blob: eec668eec71495b81e2be14996647cea58be795c [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020042static bool global_dirty_log = false;
43
Avi Kivity72e22d22012-02-08 15:05:50 +020044static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Avi Kivity093bc2c2011-07-26 14:26:01 +030050typedef struct AddrRange AddrRange;
51
Avi Kivity8417ceb2011-08-03 11:56:14 +030052/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080053 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030054 * (large MemoryRegion::alias_offset).
55 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030056struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020057 Int128 start;
58 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030059};
60
Avi Kivity08dafab2011-10-16 13:19:17 +020061static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030062{
63 return (AddrRange) { start, size };
64}
65
66static bool addrrange_equal(AddrRange r1, AddrRange r2)
67{
Avi Kivity08dafab2011-10-16 13:19:17 +020068 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030069}
70
Avi Kivity08dafab2011-10-16 13:19:17 +020071static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030072{
Avi Kivity08dafab2011-10-16 13:19:17 +020073 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030074}
75
Avi Kivity08dafab2011-10-16 13:19:17 +020076static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030077{
Avi Kivity08dafab2011-10-16 13:19:17 +020078 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030079 return range;
80}
81
Avi Kivity08dafab2011-10-16 13:19:17 +020082static bool addrrange_contains(AddrRange range, Int128 addr)
83{
84 return int128_ge(addr, range.start)
85 && int128_lt(addr, addrrange_end(range));
86}
87
Avi Kivity093bc2c2011-07-26 14:26:01 +030088static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89{
Avi Kivity08dafab2011-10-16 13:19:17 +020090 return addrrange_contains(r1, r2.start)
91 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030092}
93
94static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95{
Avi Kivity08dafab2011-10-16 13:19:17 +020096 Int128 start = int128_max(r1.start, r2.start);
97 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
98 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +030099}
100
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200101enum ListenerDirection { Forward, Reverse };
102
Avi Kivity7376e582012-02-08 21:05:17 +0200103#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200104 do { \
105 MemoryListener *_listener; \
106 \
107 switch (_direction) { \
108 case Forward: \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
112 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200113 } \
114 break; \
115 case Reverse: \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
120 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200121 } \
122 break; \
123 default: \
124 abort(); \
125 } \
126 } while (0)
127
Paolo Bonzini9a546352016-09-22 16:23:06 +0200128#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200129 do { \
130 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200131 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200132 \
133 switch (_direction) { \
134 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200137 _listener->_callback(_listener, _section, ##_args); \
138 } \
139 } \
140 break; \
141 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
143 link_as) { \
144 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200158 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200159 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200160
Avi Kivity093bc2c2011-07-26 14:26:01 +0300161struct CoalescedMemoryRange {
162 AddrRange addr;
163 QTAILQ_ENTRY(CoalescedMemoryRange) link;
164};
165
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300166struct MemoryRegionIoeventfd {
167 AddrRange addr;
168 bool match_data;
169 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200170 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300171};
172
173static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
174 MemoryRegionIoeventfd b)
175{
Avi Kivity08dafab2011-10-16 13:19:17 +0200176 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300177 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200178 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300179 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200180 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300181 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200182 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300183 return false;
184 } else if (a.match_data < b.match_data) {
185 return true;
186 } else if (a.match_data > b.match_data) {
187 return false;
188 } else if (a.match_data) {
189 if (a.data < b.data) {
190 return true;
191 } else if (a.data > b.data) {
192 return false;
193 }
194 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200195 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300196 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200197 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300198 return false;
199 }
200 return false;
201}
202
203static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
204 MemoryRegionIoeventfd b)
205{
206 return !memory_region_ioeventfd_before(a, b)
207 && !memory_region_ioeventfd_before(b, a);
208}
209
Avi Kivity093bc2c2011-07-26 14:26:01 +0300210typedef struct FlatRange FlatRange;
211typedef struct FlatView FlatView;
212
213/* Range of memory in the global map. Addresses are absolute. */
214struct FlatRange {
215 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200216 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300217 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300218 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200219 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300220 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300221};
222
223/* Flattened global view of current active memory hierarchy. Kept in sorted
224 * order.
225 */
226struct FlatView {
Paolo Bonzini374f2982013-05-17 12:37:03 +0200227 struct rcu_head rcu;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200228 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300229 FlatRange *ranges;
230 unsigned nr;
231 unsigned nr_allocated;
232};
233
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300234typedef struct AddressSpaceOps AddressSpaceOps;
235
Avi Kivity093bc2c2011-07-26 14:26:01 +0300236#define FOR_EACH_FLAT_RANGE(var, view) \
237 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
238
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200239static inline MemoryRegionSection
240section_from_flat_range(FlatRange *fr, AddressSpace *as)
241{
242 return (MemoryRegionSection) {
243 .mr = fr->mr,
244 .address_space = as,
245 .offset_within_region = fr->offset_in_region,
246 .size = fr->addr.size,
247 .offset_within_address_space = int128_get64(fr->addr.start),
248 .readonly = fr->readonly,
249 };
250}
251
Avi Kivity093bc2c2011-07-26 14:26:01 +0300252static bool flatrange_equal(FlatRange *a, FlatRange *b)
253{
254 return a->mr == b->mr
255 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300256 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200257 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300258 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300259}
260
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000261static FlatView *flatview_new(void)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300262{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000263 FlatView *view;
264
265 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200266 view->ref = 1;
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000267
268 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300269}
270
271/* Insert a range into a given position. Caller is responsible for maintaining
272 * sorting order.
273 */
274static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
275{
276 if (view->nr == view->nr_allocated) {
277 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500278 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300279 view->nr_allocated * sizeof(*view->ranges));
280 }
281 memmove(view->ranges + pos + 1, view->ranges + pos,
282 (view->nr - pos) * sizeof(FlatRange));
283 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200284 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300285 ++view->nr;
286}
287
288static void flatview_destroy(FlatView *view)
289{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200290 int i;
291
292 for (i = 0; i < view->nr; i++) {
293 memory_region_unref(view->ranges[i].mr);
294 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500295 g_free(view->ranges);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200296 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300297}
298
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200299static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200300{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200301 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200302}
303
304static void flatview_unref(FlatView *view)
305{
306 if (atomic_fetch_dec(&view->ref) == 1) {
307 flatview_destroy(view);
308 }
309}
310
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300311static bool can_merge(FlatRange *r1, FlatRange *r2)
312{
Avi Kivity08dafab2011-10-16 13:19:17 +0200313 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300314 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200315 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
316 r1->addr.size),
317 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300318 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200319 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300320 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300321}
322
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000323/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300324static void flatview_simplify(FlatView *view)
325{
326 unsigned i, j;
327
328 i = 0;
329 while (i < view->nr) {
330 j = i + 1;
331 while (j < view->nr
332 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200333 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300334 ++j;
335 }
336 ++i;
337 memmove(&view->ranges[i], &view->ranges[j],
338 (view->nr - j) * sizeof(view->ranges[j]));
339 view->nr -= j - i;
340 }
341}
342
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200343static bool memory_region_big_endian(MemoryRegion *mr)
344{
345#ifdef TARGET_WORDS_BIGENDIAN
346 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
347#else
348 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
349#endif
350}
351
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200352static bool memory_region_wrong_endianness(MemoryRegion *mr)
353{
354#ifdef TARGET_WORDS_BIGENDIAN
355 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
356#else
357 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
358#endif
359}
360
361static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
362{
363 if (memory_region_wrong_endianness(mr)) {
364 switch (size) {
365 case 1:
366 break;
367 case 2:
368 *data = bswap16(*data);
369 break;
370 case 4:
371 *data = bswap32(*data);
372 break;
373 case 8:
374 *data = bswap64(*data);
375 break;
376 default:
377 abort();
378 }
379 }
380}
381
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800382static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
383{
384 MemoryRegion *root;
385 hwaddr abs_addr = offset;
386
387 abs_addr += mr->addr;
388 for (root = mr; root->container; ) {
389 root = root->container;
390 abs_addr += root->addr;
391 }
392
393 return abs_addr;
394}
395
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800396static int get_cpu_index(void)
397{
398 if (current_cpu) {
399 return current_cpu->cpu_index;
400 }
401 return -1;
402}
403
Peter Maydellcc05c432015-04-26 16:49:23 +0100404static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
405 hwaddr addr,
406 uint64_t *value,
407 unsigned size,
408 unsigned shift,
409 uint64_t mask,
410 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200411{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200412 uint64_t tmp;
413
414 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800415 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800416 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800417 } else if (mr == &io_mem_notdirty) {
418 /* Accesses to code which has previously been translated into a TB show
419 * up in the MMIO path, as accesses to the io_mem_notdirty
420 * MemoryRegion. */
421 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800422 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
423 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800424 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800425 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200426 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100427 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200428}
429
Peter Maydellcc05c432015-04-26 16:49:23 +0100430static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
431 hwaddr addr,
432 uint64_t *value,
433 unsigned size,
434 unsigned shift,
435 uint64_t mask,
436 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300437{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300438 uint64_t tmp;
439
440 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800441 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800442 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800443 } else if (mr == &io_mem_notdirty) {
444 /* Accesses to code which has previously been translated into a TB show
445 * up in the MMIO path, as accesses to the io_mem_notdirty
446 * MemoryRegion. */
447 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800448 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
449 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800450 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800451 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300452 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100453 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300454}
455
Peter Maydellcc05c432015-04-26 16:49:23 +0100456static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
457 hwaddr addr,
458 uint64_t *value,
459 unsigned size,
460 unsigned shift,
461 uint64_t mask,
462 MemTxAttrs attrs)
463{
464 uint64_t tmp = 0;
465 MemTxResult r;
466
Peter Maydellcc05c432015-04-26 16:49:23 +0100467 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800468 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800469 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800470 } else if (mr == &io_mem_notdirty) {
471 /* Accesses to code which has previously been translated into a TB show
472 * up in the MMIO path, as accesses to the io_mem_notdirty
473 * MemoryRegion. */
474 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800475 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
476 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800477 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800478 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100479 *value |= (tmp & mask) << shift;
480 return r;
481}
482
483static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
484 hwaddr addr,
485 uint64_t *value,
486 unsigned size,
487 unsigned shift,
488 uint64_t mask,
489 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200490{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200491 uint64_t tmp;
492
493 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800494 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800495 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800496 } else if (mr == &io_mem_notdirty) {
497 /* Accesses to code which has previously been translated into a TB show
498 * up in the MMIO path, as accesses to the io_mem_notdirty
499 * MemoryRegion. */
500 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800501 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
502 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800503 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800504 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200505 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100506 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200507}
508
Peter Maydellcc05c432015-04-26 16:49:23 +0100509static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
510 hwaddr addr,
511 uint64_t *value,
512 unsigned size,
513 unsigned shift,
514 uint64_t mask,
515 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300516{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300517 uint64_t tmp;
518
519 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800520 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800521 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800522 } else if (mr == &io_mem_notdirty) {
523 /* Accesses to code which has previously been translated into a TB show
524 * up in the MMIO path, as accesses to the io_mem_notdirty
525 * MemoryRegion. */
526 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800527 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
528 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800529 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800530 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300531 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100532 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300533}
534
Peter Maydellcc05c432015-04-26 16:49:23 +0100535static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
536 hwaddr addr,
537 uint64_t *value,
538 unsigned size,
539 unsigned shift,
540 uint64_t mask,
541 MemTxAttrs attrs)
542{
543 uint64_t tmp;
544
Peter Maydellcc05c432015-04-26 16:49:23 +0100545 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800546 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800547 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800548 } else if (mr == &io_mem_notdirty) {
549 /* Accesses to code which has previously been translated into a TB show
550 * up in the MMIO path, as accesses to the io_mem_notdirty
551 * MemoryRegion. */
552 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800553 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
554 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800555 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800556 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100557 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
558}
559
560static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300561 uint64_t *value,
562 unsigned size,
563 unsigned access_size_min,
564 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200565 MemTxResult (*access_fn)
566 (MemoryRegion *mr,
567 hwaddr addr,
568 uint64_t *value,
569 unsigned size,
570 unsigned shift,
571 uint64_t mask,
572 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100573 MemoryRegion *mr,
574 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300575{
576 uint64_t access_mask;
577 unsigned access_size;
578 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100579 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300580
581 if (!access_size_min) {
582 access_size_min = 1;
583 }
584 if (!access_size_max) {
585 access_size_max = 4;
586 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200587
588 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300589 access_size = MAX(MIN(size, access_size_max), access_size_min);
590 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200591 if (memory_region_big_endian(mr)) {
592 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200593 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100594 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200595 }
596 } else {
597 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200598 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100599 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200600 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300601 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100602 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300603}
604
Avi Kivitye2177952011-12-08 15:00:18 +0200605static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
606{
Avi Kivity0d673e32012-10-02 15:28:50 +0200607 AddressSpace *as;
608
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200609 while (mr->container) {
610 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200611 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200612 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
613 if (mr == as->root) {
614 return as;
615 }
Avi Kivitye2177952011-12-08 15:00:18 +0200616 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200617 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200618}
619
Avi Kivity093bc2c2011-07-26 14:26:01 +0300620/* Render a memory region into the global view. Ranges in @view obscure
621 * ranges in @mr.
622 */
623static void render_memory_region(FlatView *view,
624 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200625 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300626 AddrRange clip,
627 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300628{
629 MemoryRegion *subregion;
630 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200631 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200632 Int128 remain;
633 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300634 FlatRange fr;
635 AddrRange tmp;
636
Avi Kivity6bba19b2011-09-14 11:54:58 +0300637 if (!mr->enabled) {
638 return;
639 }
640
Avi Kivity08dafab2011-10-16 13:19:17 +0200641 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300642 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300643
644 tmp = addrrange_make(base, mr->size);
645
646 if (!addrrange_intersects(tmp, clip)) {
647 return;
648 }
649
650 clip = addrrange_intersection(tmp, clip);
651
652 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200653 int128_subfrom(&base, int128_make64(mr->alias->addr));
654 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300655 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300656 return;
657 }
658
659 /* Render subregions in priority order. */
660 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300661 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300662 }
663
Avi Kivity14a3c102011-07-26 14:26:06 +0300664 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300665 return;
666 }
667
Avi Kivity08dafab2011-10-16 13:19:17 +0200668 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300669 base = clip.start;
670 remain = clip.size;
671
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000672 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100673 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200674 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000675 fr.readonly = readonly;
676
Avi Kivity093bc2c2011-07-26 14:26:01 +0300677 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200678 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
679 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300680 continue;
681 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200682 if (int128_lt(base, view->ranges[i].addr.start)) {
683 now = int128_min(remain,
684 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300685 fr.offset_in_region = offset_in_region;
686 fr.addr = addrrange_make(base, now);
687 flatview_insert(view, i, &fr);
688 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200689 int128_addto(&base, now);
690 offset_in_region += int128_get64(now);
691 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300692 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200693 now = int128_sub(int128_min(int128_add(base, remain),
694 addrrange_end(view->ranges[i].addr)),
695 base);
696 int128_addto(&base, now);
697 offset_in_region += int128_get64(now);
698 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300699 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200700 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300701 fr.offset_in_region = offset_in_region;
702 fr.addr = addrrange_make(base, remain);
703 flatview_insert(view, i, &fr);
704 }
705}
706
707/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200708static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300709{
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200710 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300711
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000712 view = flatview_new();
Avi Kivity093bc2c2011-07-26 14:26:01 +0300713
Avi Kivity83f3c252012-10-07 12:59:55 +0200714 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200715 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200716 addrrange_make(int128_zero(), int128_2_64()), false);
717 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200718 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300719
720 return view;
721}
722
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300723static void address_space_add_del_ioeventfds(AddressSpace *as,
724 MemoryRegionIoeventfd *fds_new,
725 unsigned fds_new_nb,
726 MemoryRegionIoeventfd *fds_old,
727 unsigned fds_old_nb)
728{
729 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200730 MemoryRegionIoeventfd *fd;
731 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300732
733 /* Generate a symmetric difference of the old and new fd sets, adding
734 * and deleting as necessary.
735 */
736
737 iold = inew = 0;
738 while (iold < fds_old_nb || inew < fds_new_nb) {
739 if (iold < fds_old_nb
740 && (inew == fds_new_nb
741 || memory_region_ioeventfd_before(fds_old[iold],
742 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200743 fd = &fds_old[iold];
744 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200745 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200746 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200747 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200748 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200749 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200750 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300751 ++iold;
752 } else if (inew < fds_new_nb
753 && (iold == fds_old_nb
754 || memory_region_ioeventfd_before(fds_new[inew],
755 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200756 fd = &fds_new[inew];
757 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200758 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200759 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200760 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200761 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200762 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200763 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300764 ++inew;
765 } else {
766 ++iold;
767 ++inew;
768 }
769 }
770}
771
Paolo Bonzini856d7242013-05-06 11:57:21 +0200772static FlatView *address_space_get_flatview(AddressSpace *as)
773{
774 FlatView *view;
775
Paolo Bonzini374f2982013-05-17 12:37:03 +0200776 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200777 do {
778 view = atomic_rcu_read(&as->current_map);
779 /* If somebody has replaced as->current_map concurrently,
780 * flatview_ref returns false.
781 */
782 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200783 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200784 return view;
785}
786
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300787static void address_space_update_ioeventfds(AddressSpace *as)
788{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200789 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300790 FlatRange *fr;
791 unsigned ioeventfd_nb = 0;
792 MemoryRegionIoeventfd *ioeventfds = NULL;
793 AddrRange tmp;
794 unsigned i;
795
Paolo Bonzini856d7242013-05-06 11:57:21 +0200796 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200797 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300798 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
799 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200800 int128_sub(fr->addr.start,
801 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300802 if (addrrange_intersects(fr->addr, tmp)) {
803 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500804 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300805 ioeventfd_nb * sizeof(*ioeventfds));
806 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
807 ioeventfds[ioeventfd_nb-1].addr = tmp;
808 }
809 }
810 }
811
812 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
813 as->ioeventfds, as->ioeventfd_nb);
814
Anthony Liguori7267c092011-08-20 22:09:37 -0500815 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300816 as->ioeventfds = ioeventfds;
817 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200818 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300819}
820
Avi Kivityb8af1af2011-07-26 14:26:12 +0300821static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200822 const FlatView *old_view,
823 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300824 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300825{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300826 unsigned iold, inew;
827 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300828
829 /* Generate a symmetric difference of the old and new memory maps.
830 * Kill ranges in the old map, and instantiate ranges in the new map.
831 */
832 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200833 while (iold < old_view->nr || inew < new_view->nr) {
834 if (iold < old_view->nr) {
835 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300836 } else {
837 frold = NULL;
838 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200839 if (inew < new_view->nr) {
840 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300841 } else {
842 frnew = NULL;
843 }
844
845 if (frold
846 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200847 || int128_lt(frold->addr.start, frnew->addr.start)
848 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300849 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000850 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300851
Avi Kivityb8af1af2011-07-26 14:26:12 +0300852 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200853 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300854 }
855
Avi Kivity093bc2c2011-07-26 14:26:01 +0300856 ++iold;
857 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000858 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300859
Avi Kivityb8af1af2011-07-26 14:26:12 +0300860 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200861 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200862 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
863 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
864 frold->dirty_log_mask,
865 frnew->dirty_log_mask);
866 }
867 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
868 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
869 frold->dirty_log_mask,
870 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300871 }
Avi Kivity5a583342011-07-26 14:26:02 +0300872 }
873
Avi Kivity093bc2c2011-07-26 14:26:01 +0300874 ++iold;
875 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300876 } else {
877 /* In new */
878
Avi Kivityb8af1af2011-07-26 14:26:12 +0300879 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200880 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300881 }
882
Avi Kivity093bc2c2011-07-26 14:26:01 +0300883 ++inew;
884 }
885 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300886}
887
Avi Kivityb8af1af2011-07-26 14:26:12 +0300888static void address_space_update_topology(AddressSpace *as)
889{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200890 FlatView *old_view = address_space_get_flatview(as);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200891 FlatView *new_view = generate_memory_topology(as->root);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000892 int i;
Avi Kivityb8af1af2011-07-26 14:26:12 +0300893
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000894 mem_begin(as);
895 for (i = 0; i < new_view->nr; i++) {
896 MemoryRegionSection mrs =
897 section_from_flat_range(&new_view->ranges[i], as);
898 mem_add(as, &mrs);
899 }
900 mem_commit(as);
901
902 if (!QTAILQ_EMPTY(&as->listeners)) {
903 address_space_update_topology_pass(as, old_view, new_view, false);
904 address_space_update_topology_pass(as, old_view, new_view, true);
905 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300906
Paolo Bonzini374f2982013-05-17 12:37:03 +0200907 /* Writes are protected by the BQL. */
908 atomic_rcu_set(&as->current_map, new_view);
909 call_rcu(old_view, flatview_unref, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200910
911 /* Note that all the old MemoryRegions are still alive up to this
912 * point. This relieves most MemoryListeners from the need to
913 * ref/unref the MemoryRegions they get---unless they use them
914 * outside the iothread mutex, in which case precise reference
915 * counting is necessary.
916 */
917 flatview_unref(old_view);
918
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300919 address_space_update_ioeventfds(as);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300920}
921
Avi Kivity4ef4db82011-07-26 14:26:13 +0300922void memory_region_transaction_begin(void)
923{
Jan Kiszkabb880de2012-08-23 13:02:32 +0200924 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +0300925 ++memory_region_transaction_depth;
926}
927
928void memory_region_transaction_commit(void)
929{
Avi Kivity0d673e32012-10-02 15:28:50 +0200930 AddressSpace *as;
931
Avi Kivity4ef4db82011-07-26 14:26:13 +0300932 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000933 assert(qemu_mutex_iothread_locked());
934
Avi Kivity4ef4db82011-07-26 14:26:13 +0300935 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +0800936 if (!memory_region_transaction_depth) {
937 if (memory_region_update_pending) {
938 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +0200939
Gonglei4dc56152014-05-08 11:47:32 +0800940 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
941 address_space_update_topology(as);
942 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000943 memory_region_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +0800944 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
945 } else if (ioeventfd_update_pending) {
946 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
947 address_space_update_ioeventfds(as);
948 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000949 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +0200950 }
Gonglei4dc56152014-05-08 11:47:32 +0800951 }
Avi Kivity4ef4db82011-07-26 14:26:13 +0300952}
953
Avi Kivity545e92e2011-08-08 19:58:48 +0300954static void memory_region_destructor_none(MemoryRegion *mr)
955{
956}
957
958static void memory_region_destructor_ram(MemoryRegion *mr)
959{
Fam Zhengf1060c52016-03-01 14:18:22 +0800960 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +0300961}
962
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700963static bool memory_region_need_escape(char c)
964{
965 return c == '/' || c == '[' || c == '\\' || c == ']';
966}
967
968static char *memory_region_escape_name(const char *name)
969{
970 const char *p;
971 char *escaped, *q;
972 uint8_t c;
973 size_t bytes = 0;
974
975 for (p = name; *p; p++) {
976 bytes += memory_region_need_escape(*p) ? 4 : 1;
977 }
978 if (bytes == p - name) {
979 return g_memdup(name, bytes + 1);
980 }
981
982 escaped = g_malloc(bytes + 1);
983 for (p = name, q = escaped; *p; p++) {
984 c = *p;
985 if (unlikely(memory_region_need_escape(c))) {
986 *q++ = '\\';
987 *q++ = 'x';
988 *q++ = "0123456789abcdef"[c >> 4];
989 c = "0123456789abcdef"[c & 15];
990 }
991 *q++ = c;
992 }
993 *q = 0;
994 return escaped;
995}
996
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +1000997static void memory_region_do_init(MemoryRegion *mr,
998 Object *owner,
999 const char *name,
1000 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001001{
Avi Kivity08dafab2011-10-16 13:19:17 +02001002 mr->size = int128_make64(size);
1003 if (size == UINT64_MAX) {
1004 mr->size = int128_2_64();
1005 }
Peter Maydell302fa282014-08-19 20:05:46 +01001006 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001007 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001008 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001009
1010 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001011 char *escaped_name = memory_region_escape_name(name);
1012 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001013
1014 if (!owner) {
1015 owner = container_get(qdev_get_machine(), "/unattached");
1016 }
1017
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001018 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001019 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001020 g_free(name_array);
1021 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001022 }
1023}
1024
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001025void memory_region_init(MemoryRegion *mr,
1026 Object *owner,
1027 const char *name,
1028 uint64_t size)
1029{
1030 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1031 memory_region_do_init(mr, owner, name, size);
1032}
1033
Eric Blaked7bce992016-01-29 06:48:55 -07001034static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1035 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001036{
1037 MemoryRegion *mr = MEMORY_REGION(obj);
1038 uint64_t value = mr->addr;
1039
Eric Blake51e72bc2016-01-29 06:48:54 -07001040 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001041}
1042
Eric Blaked7bce992016-01-29 06:48:55 -07001043static void memory_region_get_container(Object *obj, Visitor *v,
1044 const char *name, void *opaque,
1045 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001046{
1047 MemoryRegion *mr = MEMORY_REGION(obj);
1048 gchar *path = (gchar *)"";
1049
1050 if (mr->container) {
1051 path = object_get_canonical_path(OBJECT(mr->container));
1052 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001053 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001054 if (mr->container) {
1055 g_free(path);
1056 }
1057}
1058
1059static Object *memory_region_resolve_container(Object *obj, void *opaque,
1060 const char *part)
1061{
1062 MemoryRegion *mr = MEMORY_REGION(obj);
1063
1064 return OBJECT(mr->container);
1065}
1066
Eric Blaked7bce992016-01-29 06:48:55 -07001067static void memory_region_get_priority(Object *obj, Visitor *v,
1068 const char *name, void *opaque,
1069 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001070{
1071 MemoryRegion *mr = MEMORY_REGION(obj);
1072 int32_t value = mr->priority;
1073
Eric Blake51e72bc2016-01-29 06:48:54 -07001074 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001075}
1076
Eric Blaked7bce992016-01-29 06:48:55 -07001077static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1078 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001079{
1080 MemoryRegion *mr = MEMORY_REGION(obj);
1081 uint64_t value = memory_region_size(mr);
1082
Eric Blake51e72bc2016-01-29 06:48:54 -07001083 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001084}
1085
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001086static void memory_region_initfn(Object *obj)
1087{
1088 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001089 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001090
1091 mr->ops = &unassigned_mem_ops;
1092 mr->enabled = true;
1093 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001094 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001095 mr->destructor = memory_region_destructor_none;
1096 QTAILQ_INIT(&mr->subregions);
1097 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001098
1099 op = object_property_add(OBJECT(mr), "container",
1100 "link<" TYPE_MEMORY_REGION ">",
1101 memory_region_get_container,
1102 NULL, /* memory_region_set_container */
1103 NULL, NULL, &error_abort);
1104 op->resolve = memory_region_resolve_container;
1105
1106 object_property_add(OBJECT(mr), "addr", "uint64",
1107 memory_region_get_addr,
1108 NULL, /* memory_region_set_addr */
1109 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001110 object_property_add(OBJECT(mr), "priority", "uint32",
1111 memory_region_get_priority,
1112 NULL, /* memory_region_set_priority */
1113 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001114 object_property_add(OBJECT(mr), "size", "uint64",
1115 memory_region_get_size,
1116 NULL, /* memory_region_set_size, */
1117 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001118}
1119
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001120static void iommu_memory_region_initfn(Object *obj)
1121{
1122 MemoryRegion *mr = MEMORY_REGION(obj);
1123
1124 mr->is_iommu = true;
1125}
1126
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001127static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1128 unsigned size)
1129{
1130#ifdef DEBUG_UNASSIGNED
1131 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1132#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001133 if (current_cpu != NULL) {
1134 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001135 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001136 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001137}
1138
1139static void unassigned_mem_write(void *opaque, hwaddr addr,
1140 uint64_t val, unsigned size)
1141{
1142#ifdef DEBUG_UNASSIGNED
1143 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1144#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001145 if (current_cpu != NULL) {
1146 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001147 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001148}
1149
Paolo Bonzinid1970632013-05-24 13:23:38 +02001150static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1151 unsigned size, bool is_write)
1152{
1153 return false;
1154}
1155
1156const MemoryRegionOps unassigned_mem_ops = {
1157 .valid.accepts = unassigned_mem_accepts,
1158 .endianness = DEVICE_NATIVE_ENDIAN,
1159};
1160
Alex Williamson4a2e2422016-10-31 09:53:03 -06001161static uint64_t memory_region_ram_device_read(void *opaque,
1162 hwaddr addr, unsigned size)
1163{
1164 MemoryRegion *mr = opaque;
1165 uint64_t data = (uint64_t)~0;
1166
1167 switch (size) {
1168 case 1:
1169 data = *(uint8_t *)(mr->ram_block->host + addr);
1170 break;
1171 case 2:
1172 data = *(uint16_t *)(mr->ram_block->host + addr);
1173 break;
1174 case 4:
1175 data = *(uint32_t *)(mr->ram_block->host + addr);
1176 break;
1177 case 8:
1178 data = *(uint64_t *)(mr->ram_block->host + addr);
1179 break;
1180 }
1181
1182 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1183
1184 return data;
1185}
1186
1187static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1188 uint64_t data, unsigned size)
1189{
1190 MemoryRegion *mr = opaque;
1191
1192 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1193
1194 switch (size) {
1195 case 1:
1196 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1197 break;
1198 case 2:
1199 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1200 break;
1201 case 4:
1202 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1203 break;
1204 case 8:
1205 *(uint64_t *)(mr->ram_block->host + addr) = data;
1206 break;
1207 }
1208}
1209
1210static const MemoryRegionOps ram_device_mem_ops = {
1211 .read = memory_region_ram_device_read,
1212 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001213 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001214 .valid = {
1215 .min_access_size = 1,
1216 .max_access_size = 8,
1217 .unaligned = true,
1218 },
1219 .impl = {
1220 .min_access_size = 1,
1221 .max_access_size = 8,
1222 .unaligned = true,
1223 },
1224};
1225
Paolo Bonzinid2702032013-05-24 11:55:06 +02001226bool memory_region_access_valid(MemoryRegion *mr,
1227 hwaddr addr,
1228 unsigned size,
1229 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001230{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001231 int access_size_min, access_size_max;
1232 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001233
Avi Kivity093bc2c2011-07-26 14:26:01 +03001234 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1235 return false;
1236 }
1237
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001238 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001239 return true;
1240 }
1241
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001242 access_size_min = mr->ops->valid.min_access_size;
1243 if (!mr->ops->valid.min_access_size) {
1244 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001245 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001246
1247 access_size_max = mr->ops->valid.max_access_size;
1248 if (!mr->ops->valid.max_access_size) {
1249 access_size_max = 4;
1250 }
1251
1252 access_size = MAX(MIN(size, access_size_max), access_size_min);
1253 for (i = 0; i < size; i += access_size) {
1254 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1255 is_write)) {
1256 return false;
1257 }
1258 }
1259
Avi Kivity093bc2c2011-07-26 14:26:01 +03001260 return true;
1261}
1262
Peter Maydellcc05c432015-04-26 16:49:23 +01001263static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1264 hwaddr addr,
1265 uint64_t *pval,
1266 unsigned size,
1267 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001268{
Peter Maydellcc05c432015-04-26 16:49:23 +01001269 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001270
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001271 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001272 return access_with_adjusted_size(addr, pval, size,
1273 mr->ops->impl.min_access_size,
1274 mr->ops->impl.max_access_size,
1275 memory_region_read_accessor,
1276 mr, attrs);
1277 } else if (mr->ops->read_with_attrs) {
1278 return access_with_adjusted_size(addr, pval, size,
1279 mr->ops->impl.min_access_size,
1280 mr->ops->impl.max_access_size,
1281 memory_region_read_with_attrs_accessor,
1282 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001283 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001284 return access_with_adjusted_size(addr, pval, size, 1, 4,
1285 memory_region_oldmmio_read_accessor,
1286 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001287 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001288}
1289
Peter Maydell3b643492015-04-26 16:49:23 +01001290MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1291 hwaddr addr,
1292 uint64_t *pval,
1293 unsigned size,
1294 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001295{
Peter Maydellcc05c432015-04-26 16:49:23 +01001296 MemTxResult r;
1297
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001298 if (!memory_region_access_valid(mr, addr, size, false)) {
1299 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001300 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001301 }
Avi Kivitya621f382012-01-02 13:12:08 +02001302
Peter Maydellcc05c432015-04-26 16:49:23 +01001303 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001304 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001305 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001306}
1307
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001308/* Return true if an eventfd was signalled */
1309static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1310 hwaddr addr,
1311 uint64_t data,
1312 unsigned size,
1313 MemTxAttrs attrs)
1314{
1315 MemoryRegionIoeventfd ioeventfd = {
1316 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1317 .data = data,
1318 };
1319 unsigned i;
1320
1321 for (i = 0; i < mr->ioeventfd_nb; i++) {
1322 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1323 ioeventfd.e = mr->ioeventfds[i].e;
1324
1325 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1326 event_notifier_set(ioeventfd.e);
1327 return true;
1328 }
1329 }
1330
1331 return false;
1332}
1333
Peter Maydell3b643492015-04-26 16:49:23 +01001334MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1335 hwaddr addr,
1336 uint64_t data,
1337 unsigned size,
1338 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001339{
Avi Kivity897fa7c2011-11-13 13:05:27 +02001340 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001341 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001342 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001343 }
1344
Avi Kivitya621f382012-01-02 13:12:08 +02001345 adjust_endianness(mr, &data, size);
1346
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001347 if ((!kvm_eventfds_enabled()) &&
1348 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1349 return MEMTX_OK;
1350 }
1351
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001352 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001353 return access_with_adjusted_size(addr, &data, size,
1354 mr->ops->impl.min_access_size,
1355 mr->ops->impl.max_access_size,
1356 memory_region_write_accessor, mr,
1357 attrs);
1358 } else if (mr->ops->write_with_attrs) {
1359 return
1360 access_with_adjusted_size(addr, &data, size,
1361 mr->ops->impl.min_access_size,
1362 mr->ops->impl.max_access_size,
1363 memory_region_write_with_attrs_accessor,
1364 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001365 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001366 return access_with_adjusted_size(addr, &data, size, 1, 4,
1367 memory_region_oldmmio_write_accessor,
1368 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001369 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001370}
1371
Avi Kivity093bc2c2011-07-26 14:26:01 +03001372void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001373 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001374 const MemoryRegionOps *ops,
1375 void *opaque,
1376 const char *name,
1377 uint64_t size)
1378{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001379 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001380 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001381 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001382 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001383}
1384
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001385void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1386 Object *owner,
1387 const char *name,
1388 uint64_t size,
1389 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001390{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001391 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001392 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001393 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001394 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001395 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001396 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001397}
1398
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001399void memory_region_init_resizeable_ram(MemoryRegion *mr,
1400 Object *owner,
1401 const char *name,
1402 uint64_t size,
1403 uint64_t max_size,
1404 void (*resized)(const char*,
1405 uint64_t length,
1406 void *host),
1407 Error **errp)
1408{
1409 memory_region_init(mr, owner, name, size);
1410 mr->ram = true;
1411 mr->terminates = true;
1412 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001413 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1414 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001415 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001416}
1417
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001418#ifdef __linux__
1419void memory_region_init_ram_from_file(MemoryRegion *mr,
1420 struct Object *owner,
1421 const char *name,
1422 uint64_t size,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001423 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001424 const char *path,
1425 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001426{
1427 memory_region_init(mr, owner, name, size);
1428 mr->ram = true;
1429 mr->terminates = true;
1430 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001431 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001432 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001433}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001434
1435void memory_region_init_ram_from_fd(MemoryRegion *mr,
1436 struct Object *owner,
1437 const char *name,
1438 uint64_t size,
1439 bool share,
1440 int fd,
1441 Error **errp)
1442{
1443 memory_region_init(mr, owner, name, size);
1444 mr->ram = true;
1445 mr->terminates = true;
1446 mr->destructor = memory_region_destructor_ram;
1447 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1448 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1449}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001450#endif
1451
Avi Kivity093bc2c2011-07-26 14:26:01 +03001452void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001453 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001454 const char *name,
1455 uint64_t size,
1456 void *ptr)
1457{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001458 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001459 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001460 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001461 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001462 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001463
1464 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1465 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001466 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001467}
1468
Alex Williamson21e00fa2016-10-31 09:53:03 -06001469void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1470 Object *owner,
1471 const char *name,
1472 uint64_t size,
1473 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301474{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001475 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1476 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001477 mr->ops = &ram_device_mem_ops;
1478 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301479}
1480
Avi Kivity093bc2c2011-07-26 14:26:01 +03001481void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001482 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001483 const char *name,
1484 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001485 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001486 uint64_t size)
1487{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001488 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001489 mr->alias = orig;
1490 mr->alias_offset = offset;
1491}
1492
Peter Maydellb59821a2017-07-07 15:42:50 +01001493void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1494 struct Object *owner,
1495 const char *name,
1496 uint64_t size,
1497 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001498{
1499 memory_region_init(mr, owner, name, size);
1500 mr->ram = true;
1501 mr->readonly = true;
1502 mr->terminates = true;
1503 mr->destructor = memory_region_destructor_ram;
1504 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1505 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1506}
1507
Peter Maydellb59821a2017-07-07 15:42:50 +01001508void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1509 Object *owner,
1510 const MemoryRegionOps *ops,
1511 void *opaque,
1512 const char *name,
1513 uint64_t size,
1514 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001515{
Peter Maydell39e0b032016-07-04 13:06:35 +01001516 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001517 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001518 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001519 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001520 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001521 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001522 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001523 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001524}
1525
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001526void memory_region_init_iommu(void *_iommu_mr,
1527 size_t instance_size,
1528 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001529 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001530 const char *name,
1531 uint64_t size)
1532{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001533 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001534 struct MemoryRegion *mr;
1535
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001536 object_initialize(_iommu_mr, instance_size, mrtypename);
1537 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001538 memory_region_do_init(mr, owner, name, size);
1539 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001540 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001541 QLIST_INIT(&iommu_mr->iommu_notify);
1542 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001543}
1544
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001545static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001546{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001547 MemoryRegion *mr = MEMORY_REGION(obj);
1548
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001549 assert(!mr->container);
1550
1551 /* We know the region is not visible in any address space (it
1552 * does not have a container and cannot be a root either because
1553 * it has no references, so we can blindly clear mr->enabled.
1554 * memory_region_set_enabled instead could trigger a transaction
1555 * and cause an infinite loop.
1556 */
1557 mr->enabled = false;
1558 memory_region_transaction_begin();
1559 while (!QTAILQ_EMPTY(&mr->subregions)) {
1560 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1561 memory_region_del_subregion(mr, subregion);
1562 }
1563 memory_region_transaction_commit();
1564
Avi Kivity545e92e2011-08-08 19:58:48 +03001565 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001566 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001567 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001568 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001569}
1570
Paolo Bonzini803c0812013-05-07 06:59:09 +02001571Object *memory_region_owner(MemoryRegion *mr)
1572{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001573 Object *obj = OBJECT(mr);
1574 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001575}
1576
Paolo Bonzini46637be2013-05-07 09:06:00 +02001577void memory_region_ref(MemoryRegion *mr)
1578{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001579 /* MMIO callbacks most likely will access data that belongs
1580 * to the owner, hence the need to ref/unref the owner whenever
1581 * the memory region is in use.
1582 *
1583 * The memory region is a child of its owner. As long as the
1584 * owner doesn't call unparent itself on the memory region,
1585 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001586 * Memory regions without an owner are supposed to never go away;
1587 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001588 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001589 if (mr && mr->owner) {
1590 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001591 }
1592}
1593
1594void memory_region_unref(MemoryRegion *mr)
1595{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001596 if (mr && mr->owner) {
1597 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001598 }
1599}
1600
Avi Kivity093bc2c2011-07-26 14:26:01 +03001601uint64_t memory_region_size(MemoryRegion *mr)
1602{
Avi Kivity08dafab2011-10-16 13:19:17 +02001603 if (int128_eq(mr->size, int128_2_64())) {
1604 return UINT64_MAX;
1605 }
1606 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001607}
1608
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001609const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001610{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001611 if (!mr->name) {
1612 ((MemoryRegion *)mr)->name =
1613 object_get_canonical_path_component(OBJECT(mr));
1614 }
Peter Maydell302fa282014-08-19 20:05:46 +01001615 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001616}
1617
Alex Williamson21e00fa2016-10-31 09:53:03 -06001618bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301619{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001620 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301621}
1622
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001623uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001624{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001625 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001626 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001627 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1628 }
1629 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001630}
1631
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001632bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1633{
1634 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1635}
1636
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001637static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001638{
1639 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1640 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001641 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001642
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001643 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001644 flags |= iommu_notifier->notifier_flags;
1645 }
1646
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001647 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1648 imrc->notify_flag_changed(iommu_mr,
1649 iommu_mr->iommu_notify_flags,
1650 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001651 }
1652
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001653 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001654}
1655
Peter Xucdb30812016-09-23 13:02:26 +08001656void memory_region_register_iommu_notifier(MemoryRegion *mr,
1657 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001658{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001659 IOMMUMemoryRegion *iommu_mr;
1660
Jason Wangefcd38c2016-12-30 18:09:17 +08001661 if (mr->alias) {
1662 memory_region_register_iommu_notifier(mr->alias, n);
1663 return;
1664 }
1665
Peter Xucdb30812016-09-23 13:02:26 +08001666 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001667 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001668 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001669 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001670 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1671 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001672}
1673
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001674uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001675{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001676 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1677
1678 if (imrc->get_min_page_size) {
1679 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001680 }
1681 return TARGET_PAGE_SIZE;
1682}
1683
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001684void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001685{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001686 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001687 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001688 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001689 IOMMUTLBEntry iotlb;
1690
Peter Xufaa362e2017-04-07 18:59:11 +08001691 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001692 if (imrc->replay) {
1693 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001694 return;
1695 }
1696
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001697 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001698
David Gibsona788f222015-09-30 12:13:55 +10001699 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001700 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001701 if (iotlb.perm != IOMMU_NONE) {
1702 n->notify(n, &iotlb);
1703 }
1704
1705 /* if (2^64 - MR size) < granularity, it's possible to get an
1706 * infinite loop here. This should catch such a wraparound */
1707 if ((addr + granularity) < addr) {
1708 break;
1709 }
1710 }
1711}
1712
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001713void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001714{
1715 IOMMUNotifier *notifier;
1716
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001717 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1718 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001719 }
1720}
1721
Peter Xucdb30812016-09-23 13:02:26 +08001722void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1723 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001724{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001725 IOMMUMemoryRegion *iommu_mr;
1726
Jason Wangefcd38c2016-12-30 18:09:17 +08001727 if (mr->alias) {
1728 memory_region_unregister_iommu_notifier(mr->alias, n);
1729 return;
1730 }
Peter Xucdb30812016-09-23 13:02:26 +08001731 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001732 iommu_mr = IOMMU_MEMORY_REGION(mr);
1733 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001734}
1735
Peter Xubd2bfa42017-04-07 18:59:10 +08001736void memory_region_notify_one(IOMMUNotifier *notifier,
1737 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001738{
Peter Xucdb30812016-09-23 13:02:26 +08001739 IOMMUNotifierFlag request_flags;
1740
Peter Xubd2bfa42017-04-07 18:59:10 +08001741 /*
1742 * Skip the notification if the notification does not overlap
1743 * with registered range.
1744 */
1745 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1746 notifier->end < entry->iova) {
1747 return;
1748 }
Peter Xucdb30812016-09-23 13:02:26 +08001749
Peter Xubd2bfa42017-04-07 18:59:10 +08001750 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001751 request_flags = IOMMU_NOTIFIER_MAP;
1752 } else {
1753 request_flags = IOMMU_NOTIFIER_UNMAP;
1754 }
1755
Peter Xubd2bfa42017-04-07 18:59:10 +08001756 if (notifier->notifier_flags & request_flags) {
1757 notifier->notify(notifier, entry);
1758 }
1759}
1760
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001761void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001762 IOMMUTLBEntry entry)
1763{
1764 IOMMUNotifier *iommu_notifier;
1765
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001766 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001767
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001768 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001769 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001770 }
David Gibson06866572013-05-14 19:13:56 +10001771}
1772
Avi Kivity093bc2c2011-07-26 14:26:01 +03001773void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1774{
Avi Kivity5a583342011-07-26 14:26:02 +03001775 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001776 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001777
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001778 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001779 old_logging = mr->vga_logging_count;
1780 mr->vga_logging_count += log ? 1 : -1;
1781 if (!!old_logging == !!mr->vga_logging_count) {
1782 return;
1783 }
1784
Jan Kiszka59023ef2012-08-23 13:02:30 +02001785 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001786 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001787 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001788 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001789}
1790
Avi Kivitya8170e52012-10-23 12:30:10 +02001791bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1792 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001793{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001794 assert(mr->ram_block);
1795 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1796 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001797}
1798
Avi Kivitya8170e52012-10-23 12:30:10 +02001799void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1800 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001801{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001802 assert(mr->ram_block);
1803 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1804 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001805 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001806}
1807
Juan Quintela6c279db2012-10-17 20:24:28 +02001808bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1809 hwaddr size, unsigned client)
1810{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001811 assert(mr->ram_block);
1812 return cpu_physical_memory_test_and_clear_dirty(
1813 memory_region_get_ram_addr(mr) + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001814}
1815
Gerd Hoffmann8deaf122017-04-21 11:16:25 +02001816DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1817 hwaddr addr,
1818 hwaddr size,
1819 unsigned client)
1820{
1821 assert(mr->ram_block);
1822 return cpu_physical_memory_snapshot_and_clear_dirty(
1823 memory_region_get_ram_addr(mr) + addr, size, client);
1824}
1825
1826bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1827 hwaddr addr, hwaddr size)
1828{
1829 assert(mr->ram_block);
1830 return cpu_physical_memory_snapshot_get_dirty(snap,
1831 memory_region_get_ram_addr(mr) + addr, size);
1832}
Juan Quintela6c279db2012-10-17 20:24:28 +02001833
Avi Kivity093bc2c2011-07-26 14:26:01 +03001834void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1835{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001836 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001837 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001838 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001839 FlatRange *fr;
1840
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001841 /* If the same address space has multiple log_sync listeners, we
1842 * visit that address space's FlatView multiple times. But because
1843 * log_sync listeners are rare, it's still cheaper than walking each
1844 * address space once.
1845 */
1846 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1847 if (!listener->log_sync) {
1848 continue;
1849 }
1850 as = listener->address_space;
1851 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001852 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001853 if (fr->mr == mr) {
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001854 MemoryRegionSection mrs = section_from_flat_range(fr, as);
1855 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001856 }
Avi Kivity5a583342011-07-26 14:26:02 +03001857 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001858 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001859 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001860}
1861
1862void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1863{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001864 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001865 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001866 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01001867 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001868 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001869 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001870}
1871
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001872void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001873{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001874 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001875 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001876 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01001877 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001878 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001879 }
1880}
1881
Avi Kivitya8170e52012-10-23 12:30:10 +02001882void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1883 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001884{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001885 assert(mr->ram_block);
1886 cpu_physical_memory_test_and_clear_dirty(
1887 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001888}
1889
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001890int memory_region_get_fd(MemoryRegion *mr)
1891{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001892 int fd;
1893
1894 rcu_read_lock();
1895 while (mr->alias) {
1896 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001897 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001898 fd = mr->ram_block->fd;
1899 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001900
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001901 return fd;
1902}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001903
Avi Kivity093bc2c2011-07-26 14:26:01 +03001904void *memory_region_get_ram_ptr(MemoryRegion *mr)
1905{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001906 void *ptr;
1907 uint64_t offset = 0;
1908
1909 rcu_read_lock();
1910 while (mr->alias) {
1911 offset += mr->alias_offset;
1912 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001913 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08001914 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001915 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001916 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001917
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001918 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001919}
1920
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001921MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1922{
1923 RAMBlock *block;
1924
1925 block = qemu_ram_block_from_host(ptr, false, offset);
1926 if (!block) {
1927 return NULL;
1928 }
1929
1930 return block->mr;
1931}
1932
Fam Zheng7ebb2742016-03-01 14:18:20 +08001933ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1934{
1935 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1936}
1937
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001938void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1939{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001940 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001941
Gongleifa53a0e2016-05-10 10:04:59 +08001942 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001943}
1944
Avi Kivity0d673e32012-10-02 15:28:50 +02001945static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001946{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001947 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001948 FlatRange *fr;
1949 CoalescedMemoryRange *cmr;
1950 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02001951 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001952
Paolo Bonzini856d7242013-05-06 11:57:21 +02001953 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001954 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001955 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02001956 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +02001957 .address_space = as,
Avi Kivity95d29942012-10-02 18:21:54 +02001958 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001959 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02001960 };
1961
Paolo Bonzini9a546352016-09-22 16:23:06 +02001962 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02001963 int128_get64(fr->addr.start),
1964 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001965 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1966 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02001967 int128_sub(fr->addr.start,
1968 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001969 if (!addrrange_intersects(tmp, fr->addr)) {
1970 continue;
1971 }
1972 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02001973 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02001974 int128_get64(tmp.start),
1975 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001976 }
1977 }
1978 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001979 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001980}
1981
Avi Kivity0d673e32012-10-02 15:28:50 +02001982static void memory_region_update_coalesced_range(MemoryRegion *mr)
1983{
1984 AddressSpace *as;
1985
1986 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1987 memory_region_update_coalesced_range_as(mr, as);
1988 }
1989}
1990
Avi Kivity093bc2c2011-07-26 14:26:01 +03001991void memory_region_set_coalescing(MemoryRegion *mr)
1992{
1993 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02001994 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001995}
1996
1997void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001998 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001999 uint64_t size)
2000{
Anthony Liguori7267c092011-08-20 22:09:37 -05002001 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002002
Avi Kivity08dafab2011-10-16 13:19:17 +02002003 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002004 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2005 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002006 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002007}
2008
2009void memory_region_clear_coalescing(MemoryRegion *mr)
2010{
2011 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002012 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002013
Jan Kiszkad4105152012-08-23 13:02:29 +02002014 qemu_flush_coalesced_mmio_buffer();
2015 mr->flush_coalesced_mmio = false;
2016
Avi Kivity093bc2c2011-07-26 14:26:01 +03002017 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2018 cmr = QTAILQ_FIRST(&mr->coalesced);
2019 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002020 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002021 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002022 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002023
2024 if (updated) {
2025 memory_region_update_coalesced_range(mr);
2026 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002027}
2028
Jan Kiszkad4105152012-08-23 13:02:29 +02002029void memory_region_set_flush_coalesced(MemoryRegion *mr)
2030{
2031 mr->flush_coalesced_mmio = true;
2032}
2033
2034void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2035{
2036 qemu_flush_coalesced_mmio_buffer();
2037 if (QTAILQ_EMPTY(&mr->coalesced)) {
2038 mr->flush_coalesced_mmio = false;
2039 }
2040}
2041
Jan Kiszka196ea132015-06-18 18:47:20 +02002042void memory_region_set_global_locking(MemoryRegion *mr)
2043{
2044 mr->global_locking = true;
2045}
2046
2047void memory_region_clear_global_locking(MemoryRegion *mr)
2048{
2049 mr->global_locking = false;
2050}
2051
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002052static bool userspace_eventfd_warning;
2053
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002054void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002055 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002056 unsigned size,
2057 bool match_data,
2058 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002059 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002060{
2061 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002062 .addr.start = int128_make64(addr),
2063 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002064 .match_data = match_data,
2065 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002066 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002067 };
2068 unsigned i;
2069
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002070 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2071 userspace_eventfd_warning))) {
2072 userspace_eventfd_warning = true;
2073 error_report("Using eventfd without MMIO binding in KVM. "
2074 "Suboptimal performance expected");
2075 }
2076
Jason Wangb8aecea2015-11-06 16:02:45 +08002077 if (size) {
2078 adjust_endianness(mr, &mrfd.data, size);
2079 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002080 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002081 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2082 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2083 break;
2084 }
2085 }
2086 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002087 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002088 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2089 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2090 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2091 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002092 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002093 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002094}
2095
2096void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002097 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002098 unsigned size,
2099 bool match_data,
2100 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002101 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002102{
2103 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002104 .addr.start = int128_make64(addr),
2105 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002106 .match_data = match_data,
2107 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002108 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002109 };
2110 unsigned i;
2111
Jason Wangb8aecea2015-11-06 16:02:45 +08002112 if (size) {
2113 adjust_endianness(mr, &mrfd.data, size);
2114 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002115 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002116 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2117 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2118 break;
2119 }
2120 }
2121 assert(i != mr->ioeventfd_nb);
2122 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2123 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2124 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002125 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002126 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002127 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002128 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002129}
2130
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002131static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002132{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002133 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002134 MemoryRegion *other;
2135
Jan Kiszka59023ef2012-08-23 13:02:30 +02002136 memory_region_transaction_begin();
2137
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002138 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002139 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002140 if (subregion->priority >= other->priority) {
2141 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2142 goto done;
2143 }
2144 }
2145 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2146done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002147 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002148 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002149}
2150
Peter Crosthwaite05987012014-06-05 23:14:44 -07002151static void memory_region_add_subregion_common(MemoryRegion *mr,
2152 hwaddr offset,
2153 MemoryRegion *subregion)
2154{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002155 assert(!subregion->container);
2156 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002157 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002158 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002159}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002160
2161void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002162 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002163 MemoryRegion *subregion)
2164{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002165 subregion->priority = 0;
2166 memory_region_add_subregion_common(mr, offset, subregion);
2167}
2168
2169void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002170 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002171 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002172 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002173{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002174 subregion->priority = priority;
2175 memory_region_add_subregion_common(mr, offset, subregion);
2176}
2177
2178void memory_region_del_subregion(MemoryRegion *mr,
2179 MemoryRegion *subregion)
2180{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002181 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002182 assert(subregion->container == mr);
2183 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002184 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002185 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002186 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002187 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002188}
2189
2190void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2191{
2192 if (enabled == mr->enabled) {
2193 return;
2194 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002195 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002196 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002197 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002198 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002199}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002200
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002201void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2202{
2203 Int128 s = int128_make64(size);
2204
2205 if (size == UINT64_MAX) {
2206 s = int128_2_64();
2207 }
2208 if (int128_eq(s, mr->size)) {
2209 return;
2210 }
2211 memory_region_transaction_begin();
2212 mr->size = s;
2213 memory_region_update_pending = true;
2214 memory_region_transaction_commit();
2215}
2216
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002217static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002218{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002219 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002220
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002221 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002222 memory_region_transaction_begin();
2223 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002224 memory_region_del_subregion(container, mr);
2225 mr->container = container;
2226 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002227 memory_region_unref(mr);
2228 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002229 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002230}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002231
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002232void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2233{
2234 if (addr != mr->addr) {
2235 mr->addr = addr;
2236 memory_region_readd_subregion(mr);
2237 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002238}
2239
Avi Kivitya8170e52012-10-23 12:30:10 +02002240void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002241{
Avi Kivity47033592011-12-04 19:16:50 +02002242 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002243
Jan Kiszka59023ef2012-08-23 13:02:30 +02002244 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002245 return;
2246 }
2247
Jan Kiszka59023ef2012-08-23 13:02:30 +02002248 memory_region_transaction_begin();
2249 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002250 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002251 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002252}
2253
Igor Mammedova2b257d2014-10-31 16:38:37 +00002254uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2255{
2256 return mr->align;
2257}
2258
Avi Kivitye2177952011-12-08 15:00:18 +02002259static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2260{
2261 const AddrRange *addr = addr_;
2262 const FlatRange *fr = fr_;
2263
2264 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2265 return -1;
2266 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2267 return 1;
2268 }
2269 return 0;
2270}
2271
Paolo Bonzini99e86342013-05-06 10:26:13 +02002272static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002273{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002274 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002275 sizeof(FlatRange), cmp_flatrange_addr);
2276}
2277
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002278bool memory_region_is_mapped(MemoryRegion *mr)
2279{
2280 return mr->container ? true : false;
2281}
2282
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002283/* Same as memory_region_find, but it does not add a reference to the
2284 * returned region. It must be called from an RCU critical section.
2285 */
2286static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2287 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002288{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002289 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002290 MemoryRegion *root;
2291 AddressSpace *as;
2292 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002293 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002294 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002295
Paolo Bonzini73034e92013-05-07 15:48:28 +02002296 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002297 for (root = mr; root->container; ) {
2298 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002299 addr += root->addr;
2300 }
2301
2302 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002303 if (!as) {
2304 return ret;
2305 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002306 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002307
Paolo Bonzini2b647662013-05-17 12:40:44 +02002308 view = atomic_rcu_read(&as->current_map);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002309 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002310 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002311 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002312 }
2313
Paolo Bonzini99e86342013-05-06 10:26:13 +02002314 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002315 --fr;
2316 }
2317
2318 ret.mr = fr->mr;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002319 ret.address_space = as;
Avi Kivitye2177952011-12-08 15:00:18 +02002320 range = addrrange_intersection(range, fr->addr);
2321 ret.offset_within_region = fr->offset_in_region;
2322 ret.offset_within_region += int128_get64(int128_sub(range.start,
2323 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002324 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002325 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002326 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002327 return ret;
2328}
2329
2330MemoryRegionSection memory_region_find(MemoryRegion *mr,
2331 hwaddr addr, uint64_t size)
2332{
2333 MemoryRegionSection ret;
2334 rcu_read_lock();
2335 ret = memory_region_find_rcu(mr, addr, size);
2336 if (ret.mr) {
2337 memory_region_ref(ret.mr);
2338 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002339 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002340 return ret;
2341}
2342
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002343bool memory_region_present(MemoryRegion *container, hwaddr addr)
2344{
2345 MemoryRegion *mr;
2346
2347 rcu_read_lock();
2348 mr = memory_region_find_rcu(container, addr, 1).mr;
2349 rcu_read_unlock();
2350 return mr && mr != container;
2351}
2352
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002353void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002354{
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002355 MemoryListener *listener;
2356 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002357 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002358 FlatRange *fr;
2359
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002360 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2361 if (!listener->log_sync) {
2362 continue;
2363 }
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002364 as = listener->address_space;
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002365 view = address_space_get_flatview(as);
2366 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonziniadaad612016-09-22 16:09:08 +02002367 if (fr->dirty_log_mask) {
2368 MemoryRegionSection mrs = section_from_flat_range(fr, as);
2369 listener->log_sync(listener, &mrs);
2370 }
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002371 }
2372 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002373 }
2374}
2375
Jay Zhou19310762017-07-28 18:28:53 +08002376static VMChangeStateEntry *vmstate_change;
2377
Avi Kivity7664e802011-12-11 14:47:25 +02002378void memory_global_dirty_log_start(void)
2379{
Jay Zhou19310762017-07-28 18:28:53 +08002380 if (vmstate_change) {
2381 qemu_del_vm_change_state_handler(vmstate_change);
2382 vmstate_change = NULL;
2383 }
2384
Avi Kivity7664e802011-12-11 14:47:25 +02002385 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002386
Avi Kivity7376e582012-02-08 21:05:17 +02002387 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002388
2389 /* Refresh DIRTY_LOG_MIGRATION bit. */
2390 memory_region_transaction_begin();
2391 memory_region_update_pending = true;
2392 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002393}
2394
Jay Zhou19310762017-07-28 18:28:53 +08002395static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002396{
Avi Kivity7664e802011-12-11 14:47:25 +02002397 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002398
2399 /* Refresh DIRTY_LOG_MIGRATION bit. */
2400 memory_region_transaction_begin();
2401 memory_region_update_pending = true;
2402 memory_region_transaction_commit();
2403
Avi Kivity7376e582012-02-08 21:05:17 +02002404 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002405}
2406
Jay Zhou19310762017-07-28 18:28:53 +08002407static void memory_vm_change_state_handler(void *opaque, int running,
2408 RunState state)
2409{
2410 if (running) {
2411 memory_global_dirty_log_do_stop();
2412
2413 if (vmstate_change) {
2414 qemu_del_vm_change_state_handler(vmstate_change);
2415 vmstate_change = NULL;
2416 }
2417 }
2418}
2419
2420void memory_global_dirty_log_stop(void)
2421{
2422 if (!runstate_is_running()) {
2423 if (vmstate_change) {
2424 return;
2425 }
2426 vmstate_change = qemu_add_vm_change_state_handler(
2427 memory_vm_change_state_handler, NULL);
2428 return;
2429 }
2430
2431 memory_global_dirty_log_do_stop();
2432}
2433
Avi Kivity7664e802011-12-11 14:47:25 +02002434static void listener_add_address_space(MemoryListener *listener,
2435 AddressSpace *as)
2436{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002437 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002438 FlatRange *fr;
2439
Paolo Bonzini680a4782015-11-02 09:23:52 +01002440 if (listener->begin) {
2441 listener->begin(listener);
2442 }
Avi Kivity7664e802011-12-11 14:47:25 +02002443 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002444 if (listener->log_global_start) {
2445 listener->log_global_start(listener);
2446 }
Avi Kivity7664e802011-12-11 14:47:25 +02002447 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002448
Paolo Bonzini856d7242013-05-06 11:57:21 +02002449 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002450 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity7664e802011-12-11 14:47:25 +02002451 MemoryRegionSection section = {
2452 .mr = fr->mr,
Avi Kivityf6790af2012-10-02 20:13:51 +02002453 .address_space = as,
Avi Kivity7664e802011-12-11 14:47:25 +02002454 .offset_within_region = fr->offset_in_region,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002455 .size = fr->addr.size,
Avi Kivity7664e802011-12-11 14:47:25 +02002456 .offset_within_address_space = int128_get64(fr->addr.start),
Avi Kivity7a8499e2012-02-08 17:01:23 +02002457 .readonly = fr->readonly,
Avi Kivity7664e802011-12-11 14:47:25 +02002458 };
Paolo Bonzini680a4782015-11-02 09:23:52 +01002459 if (fr->dirty_log_mask && listener->log_start) {
2460 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2461 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002462 if (listener->region_add) {
2463 listener->region_add(listener, &section);
2464 }
Avi Kivity7664e802011-12-11 14:47:25 +02002465 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002466 if (listener->commit) {
2467 listener->commit(listener);
2468 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002469 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002470}
2471
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002472void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002473{
Avi Kivity72e22d22012-02-08 15:05:50 +02002474 MemoryListener *other = NULL;
2475
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002476 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002477 if (QTAILQ_EMPTY(&memory_listeners)
2478 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2479 memory_listeners)->priority) {
2480 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2481 } else {
2482 QTAILQ_FOREACH(other, &memory_listeners, link) {
2483 if (listener->priority < other->priority) {
2484 break;
2485 }
2486 }
2487 QTAILQ_INSERT_BEFORE(other, listener, link);
2488 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002489
Paolo Bonzini9a546352016-09-22 16:23:06 +02002490 if (QTAILQ_EMPTY(&as->listeners)
2491 || listener->priority >= QTAILQ_LAST(&as->listeners,
2492 memory_listeners)->priority) {
2493 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2494 } else {
2495 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2496 if (listener->priority < other->priority) {
2497 break;
2498 }
2499 }
2500 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2501 }
2502
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002503 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002504}
2505
2506void memory_listener_unregister(MemoryListener *listener)
2507{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002508 if (!listener->address_space) {
2509 return;
2510 }
2511
Avi Kivity72e22d22012-02-08 15:05:50 +02002512 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002513 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002514 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002515}
Avi Kivitye2177952011-12-08 15:00:18 +02002516
KONRAD Fredericc9356742016-10-19 15:06:49 +02002517bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2518{
2519 void *host;
2520 unsigned size = 0;
2521 unsigned offset = 0;
2522 Object *new_interface;
2523
2524 if (!mr || !mr->ops->request_ptr) {
2525 return false;
2526 }
2527
2528 /*
2529 * Avoid an update if the request_ptr call
2530 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2531 * a cache.
2532 */
2533 memory_region_transaction_begin();
2534
2535 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2536
2537 if (!host || !size) {
2538 memory_region_transaction_commit();
2539 return false;
2540 }
2541
2542 new_interface = object_new("mmio_interface");
2543 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2544 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2545 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2546 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2547 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2548 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2549
2550 memory_region_transaction_commit();
2551 return true;
2552}
2553
2554typedef struct MMIOPtrInvalidate {
2555 MemoryRegion *mr;
2556 hwaddr offset;
2557 unsigned size;
2558 int busy;
2559 int allocated;
2560} MMIOPtrInvalidate;
2561
2562#define MAX_MMIO_INVALIDATE 10
2563static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2564
2565static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2566 run_on_cpu_data data)
2567{
2568 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2569 MemoryRegion *mr = invalidate_data->mr;
2570 hwaddr offset = invalidate_data->offset;
2571 unsigned size = invalidate_data->size;
2572 MemoryRegionSection section = memory_region_find(mr, offset, size);
2573
2574 qemu_mutex_lock_iothread();
2575
2576 /* Reset dirty so this doesn't happen later. */
2577 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2578
2579 if (section.mr != mr) {
2580 /* memory_region_find add a ref on section.mr */
2581 memory_region_unref(section.mr);
2582 if (MMIO_INTERFACE(section.mr->owner)) {
2583 /* We found the interface just drop it. */
2584 object_property_set_bool(section.mr->owner, false, "realized",
2585 NULL);
2586 object_unref(section.mr->owner);
2587 object_unparent(section.mr->owner);
2588 }
2589 }
2590
2591 qemu_mutex_unlock_iothread();
2592
2593 if (invalidate_data->allocated) {
2594 g_free(invalidate_data);
2595 } else {
2596 invalidate_data->busy = 0;
2597 }
2598}
2599
2600void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2601 unsigned size)
2602{
2603 size_t i;
2604 MMIOPtrInvalidate *invalidate_data = NULL;
2605
2606 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2607 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2608 invalidate_data = &mmio_ptr_invalidate_list[i];
2609 break;
2610 }
2611 }
2612
2613 if (!invalidate_data) {
2614 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2615 invalidate_data->allocated = 1;
2616 }
2617
2618 invalidate_data->mr = mr;
2619 invalidate_data->offset = offset;
2620 invalidate_data->size = size;
2621
2622 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2623 RUN_ON_CPU_HOST_PTR(invalidate_data));
2624}
2625
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002626void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002627{
Paolo Bonziniac951902015-02-11 15:21:04 +01002628 memory_region_ref(root);
Jan Kiszka59023ef2012-08-23 13:02:30 +02002629 memory_region_transaction_begin();
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002630 as->ref_count = 1;
Avi Kivity8786db72012-10-02 13:53:41 +02002631 as->root = root;
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002632 as->malloced = false;
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +10002633 as->current_map = flatview_new();
Avi Kivity4c19eb72012-10-30 13:47:44 +02002634 as->ioeventfd_nb = 0;
2635 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002636 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002637 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002638 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10002639 as->dispatch = NULL;
Paolo Bonzinif43793c2013-04-16 15:39:51 +02002640 memory_region_update_pending |= root->enabled;
2641 memory_region_transaction_commit();
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002642}
Avi Kivity658b2222011-07-26 14:26:08 +03002643
Paolo Bonzini374f2982013-05-17 12:37:03 +02002644static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002645{
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002646 bool do_free = as->malloced;
David Gibson078c44f2014-05-30 12:59:00 -06002647
Avi Kivity83f3c252012-10-07 12:59:55 +02002648 address_space_destroy_dispatch(as);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002649 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002650
Paolo Bonzini856d7242013-05-06 11:57:21 +02002651 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002652 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002653 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002654 memory_region_unref(as->root);
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002655 if (do_free) {
2656 g_free(as);
2657 }
2658}
2659
2660AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2661{
2662 AddressSpace *as;
2663
2664 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2665 if (root == as->root && as->malloced) {
2666 as->ref_count++;
2667 return as;
2668 }
2669 }
2670
2671 as = g_malloc0(sizeof *as);
2672 address_space_init(as, root, name);
2673 as->malloced = true;
2674 return as;
Avi Kivity83f3c252012-10-07 12:59:55 +02002675}
2676
Paolo Bonzini374f2982013-05-17 12:37:03 +02002677void address_space_destroy(AddressSpace *as)
2678{
Paolo Bonziniac951902015-02-11 15:21:04 +01002679 MemoryRegion *root = as->root;
2680
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002681 as->ref_count--;
2682 if (as->ref_count) {
2683 return;
2684 }
Paolo Bonzini374f2982013-05-17 12:37:03 +02002685 /* Flush out anything from MemoryListeners listening in on this */
2686 memory_region_transaction_begin();
2687 as->root = NULL;
2688 memory_region_transaction_commit();
2689 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2690
2691 /* At this point, as->dispatch and as->current_map are dummy
2692 * entries that the guest should never use. Wait for the old
2693 * values to expire before freeing the data.
2694 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002695 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002696 call_rcu(as, do_address_space_destroy, rcu);
2697}
2698
Peter Xu4e831902017-01-16 16:40:04 +08002699static const char *memory_region_type(MemoryRegion *mr)
2700{
2701 if (memory_region_is_ram_device(mr)) {
2702 return "ramd";
2703 } else if (memory_region_is_romd(mr)) {
2704 return "romd";
2705 } else if (memory_region_is_rom(mr)) {
2706 return "rom";
2707 } else if (memory_region_is_ram(mr)) {
2708 return "ram";
2709 } else {
2710 return "i/o";
2711 }
2712}
2713
Blue Swirl314e2982011-09-11 20:22:05 +00002714typedef struct MemoryRegionList MemoryRegionList;
2715
2716struct MemoryRegionList {
2717 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002718 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002719};
2720
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002721typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002722
Peter Xu4e831902017-01-16 16:40:04 +08002723#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2724 int128_sub((size), int128_one())) : 0)
2725#define MTREE_INDENT " "
2726
Blue Swirl314e2982011-09-11 20:22:05 +00002727static void mtree_print_mr(fprintf_function mon_printf, void *f,
2728 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002729 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002730 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002731{
Jan Kiszka9479c572011-09-27 15:00:41 +02002732 MemoryRegionList *new_ml, *ml, *next_ml;
2733 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002734 const MemoryRegion *submr;
2735 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002736 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002737
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002738 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002739 return;
2740 }
2741
2742 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002743 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002744 }
2745
Peter Xub31f8412017-03-14 20:56:27 +08002746 cur_start = base + mr->addr;
2747 cur_end = cur_start + MR_SIZE(mr->size);
2748
2749 /*
2750 * Try to detect overflow of memory region. This should never
2751 * happen normally. When it happens, we dump something to warn the
2752 * user who is observing this.
2753 */
2754 if (cur_start < base || cur_end < cur_start) {
2755 mon_printf(f, "[DETECTED OVERFLOW!] ");
2756 }
2757
Blue Swirl314e2982011-09-11 20:22:05 +00002758 if (mr->alias) {
2759 MemoryRegionList *ml;
2760 bool found = false;
2761
2762 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002763 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002764 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002765 found = true;
2766 }
2767 }
2768
2769 if (!found) {
2770 ml = g_new(MemoryRegionList, 1);
2771 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002772 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002773 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002774 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002775 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002776 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002777 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002778 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002779 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002780 memory_region_name(mr),
2781 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002782 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002783 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002784 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002785 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002786 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002787 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002788 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002789 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002790 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002791 memory_region_name(mr),
2792 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002793 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002794
2795 QTAILQ_INIT(&submr_print_queue);
2796
Blue Swirl314e2982011-09-11 20:22:05 +00002797 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002798 new_ml = g_new(MemoryRegionList, 1);
2799 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002800 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002801 if (new_ml->mr->addr < ml->mr->addr ||
2802 (new_ml->mr->addr == ml->mr->addr &&
2803 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002804 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002805 new_ml = NULL;
2806 break;
2807 }
2808 }
2809 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002810 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002811 }
2812 }
2813
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002814 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002815 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002816 alias_print_queue);
2817 }
2818
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002819 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002820 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002821 }
2822}
2823
Peter Xu57bb40c2017-01-16 16:40:05 +08002824static void mtree_print_flatview(fprintf_function p, void *f,
2825 AddressSpace *as)
2826{
2827 FlatView *view = address_space_get_flatview(as);
2828 FlatRange *range = &view->ranges[0];
2829 MemoryRegion *mr;
2830 int n = view->nr;
2831
2832 if (n <= 0) {
2833 p(f, MTREE_INDENT "No rendered FlatView for "
2834 "address space '%s'\n", as->name);
2835 flatview_unref(view);
2836 return;
2837 }
2838
2839 while (n--) {
2840 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002841 if (range->offset_in_region) {
2842 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2843 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2844 int128_get64(range->addr.start),
2845 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2846 mr->priority,
2847 range->readonly ? "rom" : memory_region_type(mr),
2848 memory_region_name(mr),
2849 range->offset_in_region);
2850 } else {
2851 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2852 TARGET_FMT_plx " (prio %d, %s): %s\n",
2853 int128_get64(range->addr.start),
2854 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2855 mr->priority,
2856 range->readonly ? "rom" : memory_region_type(mr),
2857 memory_region_name(mr));
2858 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002859 range++;
2860 }
2861
2862 flatview_unref(view);
2863}
2864
2865void mtree_info(fprintf_function mon_printf, void *f, bool flatview)
Blue Swirl314e2982011-09-11 20:22:05 +00002866{
2867 MemoryRegionListHead ml_head;
2868 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02002869 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00002870
Peter Xu57bb40c2017-01-16 16:40:05 +08002871 if (flatview) {
2872 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2873 mon_printf(f, "address-space (flat view): %s\n", as->name);
2874 mtree_print_flatview(mon_printf, f, as);
2875 mon_printf(f, "\n");
2876 }
2877 return;
2878 }
2879
Blue Swirl314e2982011-09-11 20:22:05 +00002880 QTAILQ_INIT(&ml_head);
2881
Avi Kivity0d673e32012-10-02 15:28:50 +02002882 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002883 mon_printf(f, "address-space: %s\n", as->name);
2884 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2885 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00002886 }
2887
Blue Swirl314e2982011-09-11 20:22:05 +00002888 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002889 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002890 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2891 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2892 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00002893 }
2894
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002895 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02002896 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002897 }
Blue Swirl314e2982011-09-11 20:22:05 +00002898}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002899
Peter Maydellb08199c2017-07-07 15:42:51 +01002900void memory_region_init_ram(MemoryRegion *mr,
2901 struct Object *owner,
2902 const char *name,
2903 uint64_t size,
2904 Error **errp)
2905{
2906 DeviceState *owner_dev;
2907 Error *err = NULL;
2908
2909 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
2910 if (err) {
2911 error_propagate(errp, err);
2912 return;
2913 }
2914 /* This will assert if owner is neither NULL nor a DeviceState.
2915 * We only want the owner here for the purposes of defining a
2916 * unique name for migration. TODO: Ideally we should implement
2917 * a naming scheme for Objects which are not DeviceStates, in
2918 * which case we can relax this restriction.
2919 */
2920 owner_dev = DEVICE(owner);
2921 vmstate_register_ram(mr, owner_dev);
2922}
2923
2924void memory_region_init_rom(MemoryRegion *mr,
2925 struct Object *owner,
2926 const char *name,
2927 uint64_t size,
2928 Error **errp)
2929{
2930 DeviceState *owner_dev;
2931 Error *err = NULL;
2932
2933 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
2934 if (err) {
2935 error_propagate(errp, err);
2936 return;
2937 }
2938 /* This will assert if owner is neither NULL nor a DeviceState.
2939 * We only want the owner here for the purposes of defining a
2940 * unique name for migration. TODO: Ideally we should implement
2941 * a naming scheme for Objects which are not DeviceStates, in
2942 * which case we can relax this restriction.
2943 */
2944 owner_dev = DEVICE(owner);
2945 vmstate_register_ram(mr, owner_dev);
2946}
2947
2948void memory_region_init_rom_device(MemoryRegion *mr,
2949 struct Object *owner,
2950 const MemoryRegionOps *ops,
2951 void *opaque,
2952 const char *name,
2953 uint64_t size,
2954 Error **errp)
2955{
2956 DeviceState *owner_dev;
2957 Error *err = NULL;
2958
2959 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
2960 name, size, &err);
2961 if (err) {
2962 error_propagate(errp, err);
2963 return;
2964 }
2965 /* This will assert if owner is neither NULL nor a DeviceState.
2966 * We only want the owner here for the purposes of defining a
2967 * unique name for migration. TODO: Ideally we should implement
2968 * a naming scheme for Objects which are not DeviceStates, in
2969 * which case we can relax this restriction.
2970 */
2971 owner_dev = DEVICE(owner);
2972 vmstate_register_ram(mr, owner_dev);
2973}
2974
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002975static const TypeInfo memory_region_info = {
2976 .parent = TYPE_OBJECT,
2977 .name = TYPE_MEMORY_REGION,
2978 .instance_size = sizeof(MemoryRegion),
2979 .instance_init = memory_region_initfn,
2980 .instance_finalize = memory_region_finalize,
2981};
2982
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002983static const TypeInfo iommu_memory_region_info = {
2984 .parent = TYPE_MEMORY_REGION,
2985 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10002986 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002987 .instance_size = sizeof(IOMMUMemoryRegion),
2988 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10002989 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002990};
2991
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002992static void memory_register_types(void)
2993{
2994 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002995 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002996}
2997
2998type_init(memory_register_types)