blob: 15724db7d6db7a803f6b97ff701b1cb4ed7fb375 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020042static bool global_dirty_log = false;
43
Avi Kivity72e22d22012-02-08 15:05:50 +020044static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Avi Kivity093bc2c2011-07-26 14:26:01 +030050typedef struct AddrRange AddrRange;
51
Avi Kivity8417ceb2011-08-03 11:56:14 +030052/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080053 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030054 * (large MemoryRegion::alias_offset).
55 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030056struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020057 Int128 start;
58 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030059};
60
Avi Kivity08dafab2011-10-16 13:19:17 +020061static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030062{
63 return (AddrRange) { start, size };
64}
65
66static bool addrrange_equal(AddrRange r1, AddrRange r2)
67{
Avi Kivity08dafab2011-10-16 13:19:17 +020068 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030069}
70
Avi Kivity08dafab2011-10-16 13:19:17 +020071static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030072{
Avi Kivity08dafab2011-10-16 13:19:17 +020073 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030074}
75
Avi Kivity08dafab2011-10-16 13:19:17 +020076static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030077{
Avi Kivity08dafab2011-10-16 13:19:17 +020078 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030079 return range;
80}
81
Avi Kivity08dafab2011-10-16 13:19:17 +020082static bool addrrange_contains(AddrRange range, Int128 addr)
83{
84 return int128_ge(addr, range.start)
85 && int128_lt(addr, addrrange_end(range));
86}
87
Avi Kivity093bc2c2011-07-26 14:26:01 +030088static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89{
Avi Kivity08dafab2011-10-16 13:19:17 +020090 return addrrange_contains(r1, r2.start)
91 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030092}
93
94static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95{
Avi Kivity08dafab2011-10-16 13:19:17 +020096 Int128 start = int128_max(r1.start, r2.start);
97 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
98 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +030099}
100
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200101enum ListenerDirection { Forward, Reverse };
102
Avi Kivity7376e582012-02-08 21:05:17 +0200103#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200104 do { \
105 MemoryListener *_listener; \
106 \
107 switch (_direction) { \
108 case Forward: \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
112 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200113 } \
114 break; \
115 case Reverse: \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
120 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200121 } \
122 break; \
123 default: \
124 abort(); \
125 } \
126 } while (0)
127
Paolo Bonzini9a546352016-09-22 16:23:06 +0200128#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200129 do { \
130 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200131 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200132 \
133 switch (_direction) { \
134 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200137 _listener->_callback(_listener, _section, ##_args); \
138 } \
139 } \
140 break; \
141 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
143 link_as) { \
144 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200156 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200160 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200161
Avi Kivity093bc2c2011-07-26 14:26:01 +0300162struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165};
166
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300167struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200171 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300172};
173
174static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
175 MemoryRegionIoeventfd b)
176{
Avi Kivity08dafab2011-10-16 13:19:17 +0200177 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300178 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200179 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200181 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200183 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300184 return false;
185 } else if (a.match_data < b.match_data) {
186 return true;
187 } else if (a.match_data > b.match_data) {
188 return false;
189 } else if (a.match_data) {
190 if (a.data < b.data) {
191 return true;
192 } else if (a.data > b.data) {
193 return false;
194 }
195 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200196 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300197 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200198 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300199 return false;
200 }
201 return false;
202}
203
204static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
205 MemoryRegionIoeventfd b)
206{
207 return !memory_region_ioeventfd_before(a, b)
208 && !memory_region_ioeventfd_before(b, a);
209}
210
Avi Kivity093bc2c2011-07-26 14:26:01 +0300211typedef struct FlatRange FlatRange;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300212
213/* Range of memory in the global map. Addresses are absolute. */
214struct FlatRange {
215 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200216 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300217 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300218 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200219 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300220 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300221};
222
223/* Flattened global view of current active memory hierarchy. Kept in sorted
224 * order.
225 */
226struct FlatView {
Paolo Bonzini374f2982013-05-17 12:37:03 +0200227 struct rcu_head rcu;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200228 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300229 FlatRange *ranges;
230 unsigned nr;
231 unsigned nr_allocated;
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000232 struct AddressSpaceDispatch *dispatch;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000233 MemoryRegion *root;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300234};
235
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300236typedef struct AddressSpaceOps AddressSpaceOps;
237
Avi Kivity093bc2c2011-07-26 14:26:01 +0300238#define FOR_EACH_FLAT_RANGE(var, view) \
239 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
240
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200241static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000242section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200243{
244 return (MemoryRegionSection) {
245 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000246 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200247 .offset_within_region = fr->offset_in_region,
248 .size = fr->addr.size,
249 .offset_within_address_space = int128_get64(fr->addr.start),
250 .readonly = fr->readonly,
251 };
252}
253
Avi Kivity093bc2c2011-07-26 14:26:01 +0300254static bool flatrange_equal(FlatRange *a, FlatRange *b)
255{
256 return a->mr == b->mr
257 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300258 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200259 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300260 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300261}
262
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000263static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300264{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000265 FlatView *view;
266
267 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200268 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000269 view->root = mr_root;
270 memory_region_ref(mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000271
272 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300273}
274
275/* Insert a range into a given position. Caller is responsible for maintaining
276 * sorting order.
277 */
278static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
279{
280 if (view->nr == view->nr_allocated) {
281 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500282 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300283 view->nr_allocated * sizeof(*view->ranges));
284 }
285 memmove(view->ranges + pos + 1, view->ranges + pos,
286 (view->nr - pos) * sizeof(FlatRange));
287 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200288 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300289 ++view->nr;
290}
291
292static void flatview_destroy(FlatView *view)
293{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200294 int i;
295
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000296 if (view->dispatch) {
297 address_space_dispatch_free(view->dispatch);
298 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200299 for (i = 0; i < view->nr; i++) {
300 memory_region_unref(view->ranges[i].mr);
301 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500302 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000303 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200304 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300305}
306
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200307static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200308{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200309 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200310}
311
312static void flatview_unref(FlatView *view)
313{
314 if (atomic_fetch_dec(&view->ref) == 1) {
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000315 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200316 }
317}
318
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000319FlatView *address_space_to_flatview(AddressSpace *as)
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000320{
321 return atomic_rcu_read(&as->current_map);
322}
323
324AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
325{
326 return fv->dispatch;
327}
328
329AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
330{
331 return flatview_to_dispatch(address_space_to_flatview(as));
332}
333
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300334static bool can_merge(FlatRange *r1, FlatRange *r2)
335{
Avi Kivity08dafab2011-10-16 13:19:17 +0200336 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300337 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200338 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
339 r1->addr.size),
340 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300341 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200342 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300343 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300344}
345
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000346/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300347static void flatview_simplify(FlatView *view)
348{
349 unsigned i, j;
350
351 i = 0;
352 while (i < view->nr) {
353 j = i + 1;
354 while (j < view->nr
355 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200356 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300357 ++j;
358 }
359 ++i;
360 memmove(&view->ranges[i], &view->ranges[j],
361 (view->nr - j) * sizeof(view->ranges[j]));
362 view->nr -= j - i;
363 }
364}
365
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200366static bool memory_region_big_endian(MemoryRegion *mr)
367{
368#ifdef TARGET_WORDS_BIGENDIAN
369 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
370#else
371 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
372#endif
373}
374
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200375static bool memory_region_wrong_endianness(MemoryRegion *mr)
376{
377#ifdef TARGET_WORDS_BIGENDIAN
378 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
379#else
380 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
381#endif
382}
383
384static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
385{
386 if (memory_region_wrong_endianness(mr)) {
387 switch (size) {
388 case 1:
389 break;
390 case 2:
391 *data = bswap16(*data);
392 break;
393 case 4:
394 *data = bswap32(*data);
395 break;
396 case 8:
397 *data = bswap64(*data);
398 break;
399 default:
400 abort();
401 }
402 }
403}
404
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800405static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
406{
407 MemoryRegion *root;
408 hwaddr abs_addr = offset;
409
410 abs_addr += mr->addr;
411 for (root = mr; root->container; ) {
412 root = root->container;
413 abs_addr += root->addr;
414 }
415
416 return abs_addr;
417}
418
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800419static int get_cpu_index(void)
420{
421 if (current_cpu) {
422 return current_cpu->cpu_index;
423 }
424 return -1;
425}
426
Peter Maydellcc05c432015-04-26 16:49:23 +0100427static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
428 hwaddr addr,
429 uint64_t *value,
430 unsigned size,
431 unsigned shift,
432 uint64_t mask,
433 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200434{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200435 uint64_t tmp;
436
437 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800438 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800439 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800440 } else if (mr == &io_mem_notdirty) {
441 /* Accesses to code which has previously been translated into a TB show
442 * up in the MMIO path, as accesses to the io_mem_notdirty
443 * MemoryRegion. */
444 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800445 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
446 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800447 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800448 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200449 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100450 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200451}
452
Peter Maydellcc05c432015-04-26 16:49:23 +0100453static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
454 hwaddr addr,
455 uint64_t *value,
456 unsigned size,
457 unsigned shift,
458 uint64_t mask,
459 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300460{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300461 uint64_t tmp;
462
463 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800464 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800465 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800466 } else if (mr == &io_mem_notdirty) {
467 /* Accesses to code which has previously been translated into a TB show
468 * up in the MMIO path, as accesses to the io_mem_notdirty
469 * MemoryRegion. */
470 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800471 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
472 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800473 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800474 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300475 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100476 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300477}
478
Peter Maydellcc05c432015-04-26 16:49:23 +0100479static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
480 hwaddr addr,
481 uint64_t *value,
482 unsigned size,
483 unsigned shift,
484 uint64_t mask,
485 MemTxAttrs attrs)
486{
487 uint64_t tmp = 0;
488 MemTxResult r;
489
Peter Maydellcc05c432015-04-26 16:49:23 +0100490 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800491 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800492 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800493 } else if (mr == &io_mem_notdirty) {
494 /* Accesses to code which has previously been translated into a TB show
495 * up in the MMIO path, as accesses to the io_mem_notdirty
496 * MemoryRegion. */
497 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800498 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
499 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800500 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800501 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100502 *value |= (tmp & mask) << shift;
503 return r;
504}
505
506static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
507 hwaddr addr,
508 uint64_t *value,
509 unsigned size,
510 unsigned shift,
511 uint64_t mask,
512 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200513{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200514 uint64_t tmp;
515
516 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800517 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800518 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800519 } else if (mr == &io_mem_notdirty) {
520 /* Accesses to code which has previously been translated into a TB show
521 * up in the MMIO path, as accesses to the io_mem_notdirty
522 * MemoryRegion. */
523 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800524 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
525 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800526 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800527 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200528 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100529 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200530}
531
Peter Maydellcc05c432015-04-26 16:49:23 +0100532static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
533 hwaddr addr,
534 uint64_t *value,
535 unsigned size,
536 unsigned shift,
537 uint64_t mask,
538 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300539{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300540 uint64_t tmp;
541
542 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800543 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800544 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800545 } else if (mr == &io_mem_notdirty) {
546 /* Accesses to code which has previously been translated into a TB show
547 * up in the MMIO path, as accesses to the io_mem_notdirty
548 * MemoryRegion. */
549 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800550 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
551 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800552 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800553 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300554 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100555 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300556}
557
Peter Maydellcc05c432015-04-26 16:49:23 +0100558static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
559 hwaddr addr,
560 uint64_t *value,
561 unsigned size,
562 unsigned shift,
563 uint64_t mask,
564 MemTxAttrs attrs)
565{
566 uint64_t tmp;
567
Peter Maydellcc05c432015-04-26 16:49:23 +0100568 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800569 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800570 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800571 } else if (mr == &io_mem_notdirty) {
572 /* Accesses to code which has previously been translated into a TB show
573 * up in the MMIO path, as accesses to the io_mem_notdirty
574 * MemoryRegion. */
575 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800576 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
577 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800578 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800579 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100580 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
581}
582
583static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300584 uint64_t *value,
585 unsigned size,
586 unsigned access_size_min,
587 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200588 MemTxResult (*access_fn)
589 (MemoryRegion *mr,
590 hwaddr addr,
591 uint64_t *value,
592 unsigned size,
593 unsigned shift,
594 uint64_t mask,
595 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100596 MemoryRegion *mr,
597 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300598{
599 uint64_t access_mask;
600 unsigned access_size;
601 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100602 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300603
604 if (!access_size_min) {
605 access_size_min = 1;
606 }
607 if (!access_size_max) {
608 access_size_max = 4;
609 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200610
611 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300612 access_size = MAX(MIN(size, access_size_max), access_size_min);
613 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200614 if (memory_region_big_endian(mr)) {
615 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200616 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100617 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200618 }
619 } else {
620 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200621 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100622 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200623 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300624 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100625 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300626}
627
Avi Kivitye2177952011-12-08 15:00:18 +0200628static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
629{
Avi Kivity0d673e32012-10-02 15:28:50 +0200630 AddressSpace *as;
631
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200632 while (mr->container) {
633 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200634 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200635 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
636 if (mr == as->root) {
637 return as;
638 }
Avi Kivitye2177952011-12-08 15:00:18 +0200639 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200640 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200641}
642
Avi Kivity093bc2c2011-07-26 14:26:01 +0300643/* Render a memory region into the global view. Ranges in @view obscure
644 * ranges in @mr.
645 */
646static void render_memory_region(FlatView *view,
647 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200648 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300649 AddrRange clip,
650 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300651{
652 MemoryRegion *subregion;
653 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200654 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200655 Int128 remain;
656 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300657 FlatRange fr;
658 AddrRange tmp;
659
Avi Kivity6bba19b2011-09-14 11:54:58 +0300660 if (!mr->enabled) {
661 return;
662 }
663
Avi Kivity08dafab2011-10-16 13:19:17 +0200664 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300665 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300666
667 tmp = addrrange_make(base, mr->size);
668
669 if (!addrrange_intersects(tmp, clip)) {
670 return;
671 }
672
673 clip = addrrange_intersection(tmp, clip);
674
675 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200676 int128_subfrom(&base, int128_make64(mr->alias->addr));
677 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300678 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300679 return;
680 }
681
682 /* Render subregions in priority order. */
683 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300684 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300685 }
686
Avi Kivity14a3c102011-07-26 14:26:06 +0300687 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300688 return;
689 }
690
Avi Kivity08dafab2011-10-16 13:19:17 +0200691 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300692 base = clip.start;
693 remain = clip.size;
694
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000695 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100696 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200697 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000698 fr.readonly = readonly;
699
Avi Kivity093bc2c2011-07-26 14:26:01 +0300700 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200701 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
702 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300703 continue;
704 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200705 if (int128_lt(base, view->ranges[i].addr.start)) {
706 now = int128_min(remain,
707 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300708 fr.offset_in_region = offset_in_region;
709 fr.addr = addrrange_make(base, now);
710 flatview_insert(view, i, &fr);
711 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200712 int128_addto(&base, now);
713 offset_in_region += int128_get64(now);
714 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300715 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200716 now = int128_sub(int128_min(int128_add(base, remain),
717 addrrange_end(view->ranges[i].addr)),
718 base);
719 int128_addto(&base, now);
720 offset_in_region += int128_get64(now);
721 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300722 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200723 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300724 fr.offset_in_region = offset_in_region;
725 fr.addr = addrrange_make(base, remain);
726 flatview_insert(view, i, &fr);
727 }
728}
729
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000730static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
731{
732 while (mr->alias && !mr->alias_offset &&
733 int128_ge(mr->size, mr->alias->size)) {
734 /* The alias is included in its entirety. Use it as
735 * the "real" root, so that we can share more FlatViews.
736 */
737 mr = mr->alias;
738 }
739
740 return mr;
741}
742
Avi Kivity093bc2c2011-07-26 14:26:01 +0300743/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200744static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300745{
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200746 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300747
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000748 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300749
Avi Kivity83f3c252012-10-07 12:59:55 +0200750 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200751 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200752 addrrange_make(int128_zero(), int128_2_64()), false);
753 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200754 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300755
756 return view;
757}
758
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300759static void address_space_add_del_ioeventfds(AddressSpace *as,
760 MemoryRegionIoeventfd *fds_new,
761 unsigned fds_new_nb,
762 MemoryRegionIoeventfd *fds_old,
763 unsigned fds_old_nb)
764{
765 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200766 MemoryRegionIoeventfd *fd;
767 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300768
769 /* Generate a symmetric difference of the old and new fd sets, adding
770 * and deleting as necessary.
771 */
772
773 iold = inew = 0;
774 while (iold < fds_old_nb || inew < fds_new_nb) {
775 if (iold < fds_old_nb
776 && (inew == fds_new_nb
777 || memory_region_ioeventfd_before(fds_old[iold],
778 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200779 fd = &fds_old[iold];
780 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000781 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200782 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200783 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200784 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200785 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200786 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300787 ++iold;
788 } else if (inew < fds_new_nb
789 && (iold == fds_old_nb
790 || memory_region_ioeventfd_before(fds_new[inew],
791 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200792 fd = &fds_new[inew];
793 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000794 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200795 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200796 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200797 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200798 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200799 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300800 ++inew;
801 } else {
802 ++iold;
803 ++inew;
804 }
805 }
806}
807
Paolo Bonzini856d7242013-05-06 11:57:21 +0200808static FlatView *address_space_get_flatview(AddressSpace *as)
809{
810 FlatView *view;
811
Paolo Bonzini374f2982013-05-17 12:37:03 +0200812 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200813 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000814 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200815 /* If somebody has replaced as->current_map concurrently,
816 * flatview_ref returns false.
817 */
818 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200819 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200820 return view;
821}
822
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300823static void address_space_update_ioeventfds(AddressSpace *as)
824{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200825 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300826 FlatRange *fr;
827 unsigned ioeventfd_nb = 0;
828 MemoryRegionIoeventfd *ioeventfds = NULL;
829 AddrRange tmp;
830 unsigned i;
831
Paolo Bonzini856d7242013-05-06 11:57:21 +0200832 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200833 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300834 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
835 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200836 int128_sub(fr->addr.start,
837 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300838 if (addrrange_intersects(fr->addr, tmp)) {
839 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500840 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300841 ioeventfd_nb * sizeof(*ioeventfds));
842 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
843 ioeventfds[ioeventfd_nb-1].addr = tmp;
844 }
845 }
846 }
847
848 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
849 as->ioeventfds, as->ioeventfd_nb);
850
Anthony Liguori7267c092011-08-20 22:09:37 -0500851 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300852 as->ioeventfds = ioeventfds;
853 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200854 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300855}
856
Avi Kivityb8af1af2011-07-26 14:26:12 +0300857static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200858 const FlatView *old_view,
859 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300860 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300861{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300862 unsigned iold, inew;
863 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300864
865 /* Generate a symmetric difference of the old and new memory maps.
866 * Kill ranges in the old map, and instantiate ranges in the new map.
867 */
868 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200869 while (iold < old_view->nr || inew < new_view->nr) {
870 if (iold < old_view->nr) {
871 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300872 } else {
873 frold = NULL;
874 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200875 if (inew < new_view->nr) {
876 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300877 } else {
878 frnew = NULL;
879 }
880
881 if (frold
882 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200883 || int128_lt(frold->addr.start, frnew->addr.start)
884 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300885 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000886 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300887
Avi Kivityb8af1af2011-07-26 14:26:12 +0300888 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200889 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300890 }
891
Avi Kivity093bc2c2011-07-26 14:26:01 +0300892 ++iold;
893 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000894 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300895
Avi Kivityb8af1af2011-07-26 14:26:12 +0300896 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200897 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200898 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
899 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
900 frold->dirty_log_mask,
901 frnew->dirty_log_mask);
902 }
903 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
904 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
905 frold->dirty_log_mask,
906 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300907 }
Avi Kivity5a583342011-07-26 14:26:02 +0300908 }
909
Avi Kivity093bc2c2011-07-26 14:26:01 +0300910 ++iold;
911 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300912 } else {
913 /* In new */
914
Avi Kivityb8af1af2011-07-26 14:26:12 +0300915 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200916 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300917 }
918
Avi Kivity093bc2c2011-07-26 14:26:01 +0300919 ++inew;
920 }
921 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300922}
923
Avi Kivityb8af1af2011-07-26 14:26:12 +0300924static void address_space_update_topology(AddressSpace *as)
925{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200926 FlatView *old_view = address_space_get_flatview(as);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000927 MemoryRegion *physmr = memory_region_get_flatview_root(old_view->root);
928 FlatView *new_view = generate_memory_topology(physmr);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000929 int i;
Avi Kivityb8af1af2011-07-26 14:26:12 +0300930
Alexey Kardashevskiy8629d3f2017-09-21 18:51:00 +1000931 new_view->dispatch = address_space_dispatch_new(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000932 for (i = 0; i < new_view->nr; i++) {
933 MemoryRegionSection mrs =
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000934 section_from_flat_range(&new_view->ranges[i], new_view);
Alexey Kardashevskiy8629d3f2017-09-21 18:51:00 +1000935 flatview_add_to_dispatch(new_view, &mrs);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000936 }
Alexey Kardashevskiy8629d3f2017-09-21 18:51:00 +1000937 address_space_dispatch_compact(new_view->dispatch);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000938
939 if (!QTAILQ_EMPTY(&as->listeners)) {
940 address_space_update_topology_pass(as, old_view, new_view, false);
941 address_space_update_topology_pass(as, old_view, new_view, true);
942 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300943
Paolo Bonzini374f2982013-05-17 12:37:03 +0200944 /* Writes are protected by the BQL. */
945 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000946 flatview_unref(old_view);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200947
948 /* Note that all the old MemoryRegions are still alive up to this
949 * point. This relieves most MemoryListeners from the need to
950 * ref/unref the MemoryRegions they get---unless they use them
951 * outside the iothread mutex, in which case precise reference
952 * counting is necessary.
953 */
954 flatview_unref(old_view);
955
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300956 address_space_update_ioeventfds(as);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300957}
958
Avi Kivity4ef4db82011-07-26 14:26:13 +0300959void memory_region_transaction_begin(void)
960{
Jan Kiszkabb880de2012-08-23 13:02:32 +0200961 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +0300962 ++memory_region_transaction_depth;
963}
964
965void memory_region_transaction_commit(void)
966{
Avi Kivity0d673e32012-10-02 15:28:50 +0200967 AddressSpace *as;
968
Avi Kivity4ef4db82011-07-26 14:26:13 +0300969 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000970 assert(qemu_mutex_iothread_locked());
971
Avi Kivity4ef4db82011-07-26 14:26:13 +0300972 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +0800973 if (!memory_region_transaction_depth) {
974 if (memory_region_update_pending) {
975 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +0200976
Gonglei4dc56152014-05-08 11:47:32 +0800977 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
978 address_space_update_topology(as);
979 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000980 memory_region_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +0800981 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
982 } else if (ioeventfd_update_pending) {
983 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
984 address_space_update_ioeventfds(as);
985 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000986 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +0200987 }
Gonglei4dc56152014-05-08 11:47:32 +0800988 }
Avi Kivity4ef4db82011-07-26 14:26:13 +0300989}
990
Avi Kivity545e92e2011-08-08 19:58:48 +0300991static void memory_region_destructor_none(MemoryRegion *mr)
992{
993}
994
995static void memory_region_destructor_ram(MemoryRegion *mr)
996{
Fam Zhengf1060c52016-03-01 14:18:22 +0800997 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +0300998}
999
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001000static bool memory_region_need_escape(char c)
1001{
1002 return c == '/' || c == '[' || c == '\\' || c == ']';
1003}
1004
1005static char *memory_region_escape_name(const char *name)
1006{
1007 const char *p;
1008 char *escaped, *q;
1009 uint8_t c;
1010 size_t bytes = 0;
1011
1012 for (p = name; *p; p++) {
1013 bytes += memory_region_need_escape(*p) ? 4 : 1;
1014 }
1015 if (bytes == p - name) {
1016 return g_memdup(name, bytes + 1);
1017 }
1018
1019 escaped = g_malloc(bytes + 1);
1020 for (p = name, q = escaped; *p; p++) {
1021 c = *p;
1022 if (unlikely(memory_region_need_escape(c))) {
1023 *q++ = '\\';
1024 *q++ = 'x';
1025 *q++ = "0123456789abcdef"[c >> 4];
1026 c = "0123456789abcdef"[c & 15];
1027 }
1028 *q++ = c;
1029 }
1030 *q = 0;
1031 return escaped;
1032}
1033
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001034static void memory_region_do_init(MemoryRegion *mr,
1035 Object *owner,
1036 const char *name,
1037 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001038{
Avi Kivity08dafab2011-10-16 13:19:17 +02001039 mr->size = int128_make64(size);
1040 if (size == UINT64_MAX) {
1041 mr->size = int128_2_64();
1042 }
Peter Maydell302fa282014-08-19 20:05:46 +01001043 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001044 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001045 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001046
1047 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001048 char *escaped_name = memory_region_escape_name(name);
1049 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001050
1051 if (!owner) {
1052 owner = container_get(qdev_get_machine(), "/unattached");
1053 }
1054
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001055 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001056 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001057 g_free(name_array);
1058 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001059 }
1060}
1061
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001062void memory_region_init(MemoryRegion *mr,
1063 Object *owner,
1064 const char *name,
1065 uint64_t size)
1066{
1067 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1068 memory_region_do_init(mr, owner, name, size);
1069}
1070
Eric Blaked7bce992016-01-29 06:48:55 -07001071static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1072 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001073{
1074 MemoryRegion *mr = MEMORY_REGION(obj);
1075 uint64_t value = mr->addr;
1076
Eric Blake51e72bc2016-01-29 06:48:54 -07001077 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001078}
1079
Eric Blaked7bce992016-01-29 06:48:55 -07001080static void memory_region_get_container(Object *obj, Visitor *v,
1081 const char *name, void *opaque,
1082 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001083{
1084 MemoryRegion *mr = MEMORY_REGION(obj);
1085 gchar *path = (gchar *)"";
1086
1087 if (mr->container) {
1088 path = object_get_canonical_path(OBJECT(mr->container));
1089 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001090 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001091 if (mr->container) {
1092 g_free(path);
1093 }
1094}
1095
1096static Object *memory_region_resolve_container(Object *obj, void *opaque,
1097 const char *part)
1098{
1099 MemoryRegion *mr = MEMORY_REGION(obj);
1100
1101 return OBJECT(mr->container);
1102}
1103
Eric Blaked7bce992016-01-29 06:48:55 -07001104static void memory_region_get_priority(Object *obj, Visitor *v,
1105 const char *name, void *opaque,
1106 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001107{
1108 MemoryRegion *mr = MEMORY_REGION(obj);
1109 int32_t value = mr->priority;
1110
Eric Blake51e72bc2016-01-29 06:48:54 -07001111 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001112}
1113
Eric Blaked7bce992016-01-29 06:48:55 -07001114static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1115 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001116{
1117 MemoryRegion *mr = MEMORY_REGION(obj);
1118 uint64_t value = memory_region_size(mr);
1119
Eric Blake51e72bc2016-01-29 06:48:54 -07001120 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001121}
1122
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001123static void memory_region_initfn(Object *obj)
1124{
1125 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001126 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001127
1128 mr->ops = &unassigned_mem_ops;
1129 mr->enabled = true;
1130 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001131 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001132 mr->destructor = memory_region_destructor_none;
1133 QTAILQ_INIT(&mr->subregions);
1134 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001135
1136 op = object_property_add(OBJECT(mr), "container",
1137 "link<" TYPE_MEMORY_REGION ">",
1138 memory_region_get_container,
1139 NULL, /* memory_region_set_container */
1140 NULL, NULL, &error_abort);
1141 op->resolve = memory_region_resolve_container;
1142
1143 object_property_add(OBJECT(mr), "addr", "uint64",
1144 memory_region_get_addr,
1145 NULL, /* memory_region_set_addr */
1146 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001147 object_property_add(OBJECT(mr), "priority", "uint32",
1148 memory_region_get_priority,
1149 NULL, /* memory_region_set_priority */
1150 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001151 object_property_add(OBJECT(mr), "size", "uint64",
1152 memory_region_get_size,
1153 NULL, /* memory_region_set_size, */
1154 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001155}
1156
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001157static void iommu_memory_region_initfn(Object *obj)
1158{
1159 MemoryRegion *mr = MEMORY_REGION(obj);
1160
1161 mr->is_iommu = true;
1162}
1163
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001164static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1165 unsigned size)
1166{
1167#ifdef DEBUG_UNASSIGNED
1168 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1169#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001170 if (current_cpu != NULL) {
1171 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001172 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001173 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001174}
1175
1176static void unassigned_mem_write(void *opaque, hwaddr addr,
1177 uint64_t val, unsigned size)
1178{
1179#ifdef DEBUG_UNASSIGNED
1180 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1181#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001182 if (current_cpu != NULL) {
1183 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001184 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001185}
1186
Paolo Bonzinid1970632013-05-24 13:23:38 +02001187static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1188 unsigned size, bool is_write)
1189{
1190 return false;
1191}
1192
1193const MemoryRegionOps unassigned_mem_ops = {
1194 .valid.accepts = unassigned_mem_accepts,
1195 .endianness = DEVICE_NATIVE_ENDIAN,
1196};
1197
Alex Williamson4a2e2422016-10-31 09:53:03 -06001198static uint64_t memory_region_ram_device_read(void *opaque,
1199 hwaddr addr, unsigned size)
1200{
1201 MemoryRegion *mr = opaque;
1202 uint64_t data = (uint64_t)~0;
1203
1204 switch (size) {
1205 case 1:
1206 data = *(uint8_t *)(mr->ram_block->host + addr);
1207 break;
1208 case 2:
1209 data = *(uint16_t *)(mr->ram_block->host + addr);
1210 break;
1211 case 4:
1212 data = *(uint32_t *)(mr->ram_block->host + addr);
1213 break;
1214 case 8:
1215 data = *(uint64_t *)(mr->ram_block->host + addr);
1216 break;
1217 }
1218
1219 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1220
1221 return data;
1222}
1223
1224static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1225 uint64_t data, unsigned size)
1226{
1227 MemoryRegion *mr = opaque;
1228
1229 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1230
1231 switch (size) {
1232 case 1:
1233 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1234 break;
1235 case 2:
1236 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1237 break;
1238 case 4:
1239 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1240 break;
1241 case 8:
1242 *(uint64_t *)(mr->ram_block->host + addr) = data;
1243 break;
1244 }
1245}
1246
1247static const MemoryRegionOps ram_device_mem_ops = {
1248 .read = memory_region_ram_device_read,
1249 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001250 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001251 .valid = {
1252 .min_access_size = 1,
1253 .max_access_size = 8,
1254 .unaligned = true,
1255 },
1256 .impl = {
1257 .min_access_size = 1,
1258 .max_access_size = 8,
1259 .unaligned = true,
1260 },
1261};
1262
Paolo Bonzinid2702032013-05-24 11:55:06 +02001263bool memory_region_access_valid(MemoryRegion *mr,
1264 hwaddr addr,
1265 unsigned size,
1266 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001267{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001268 int access_size_min, access_size_max;
1269 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001270
Avi Kivity093bc2c2011-07-26 14:26:01 +03001271 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1272 return false;
1273 }
1274
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001275 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001276 return true;
1277 }
1278
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001279 access_size_min = mr->ops->valid.min_access_size;
1280 if (!mr->ops->valid.min_access_size) {
1281 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001282 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001283
1284 access_size_max = mr->ops->valid.max_access_size;
1285 if (!mr->ops->valid.max_access_size) {
1286 access_size_max = 4;
1287 }
1288
1289 access_size = MAX(MIN(size, access_size_max), access_size_min);
1290 for (i = 0; i < size; i += access_size) {
1291 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1292 is_write)) {
1293 return false;
1294 }
1295 }
1296
Avi Kivity093bc2c2011-07-26 14:26:01 +03001297 return true;
1298}
1299
Peter Maydellcc05c432015-04-26 16:49:23 +01001300static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1301 hwaddr addr,
1302 uint64_t *pval,
1303 unsigned size,
1304 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001305{
Peter Maydellcc05c432015-04-26 16:49:23 +01001306 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001307
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001308 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001309 return access_with_adjusted_size(addr, pval, size,
1310 mr->ops->impl.min_access_size,
1311 mr->ops->impl.max_access_size,
1312 memory_region_read_accessor,
1313 mr, attrs);
1314 } else if (mr->ops->read_with_attrs) {
1315 return access_with_adjusted_size(addr, pval, size,
1316 mr->ops->impl.min_access_size,
1317 mr->ops->impl.max_access_size,
1318 memory_region_read_with_attrs_accessor,
1319 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001320 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001321 return access_with_adjusted_size(addr, pval, size, 1, 4,
1322 memory_region_oldmmio_read_accessor,
1323 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001324 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001325}
1326
Peter Maydell3b643492015-04-26 16:49:23 +01001327MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1328 hwaddr addr,
1329 uint64_t *pval,
1330 unsigned size,
1331 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001332{
Peter Maydellcc05c432015-04-26 16:49:23 +01001333 MemTxResult r;
1334
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001335 if (!memory_region_access_valid(mr, addr, size, false)) {
1336 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001337 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001338 }
Avi Kivitya621f382012-01-02 13:12:08 +02001339
Peter Maydellcc05c432015-04-26 16:49:23 +01001340 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001341 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001342 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001343}
1344
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001345/* Return true if an eventfd was signalled */
1346static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1347 hwaddr addr,
1348 uint64_t data,
1349 unsigned size,
1350 MemTxAttrs attrs)
1351{
1352 MemoryRegionIoeventfd ioeventfd = {
1353 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1354 .data = data,
1355 };
1356 unsigned i;
1357
1358 for (i = 0; i < mr->ioeventfd_nb; i++) {
1359 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1360 ioeventfd.e = mr->ioeventfds[i].e;
1361
1362 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1363 event_notifier_set(ioeventfd.e);
1364 return true;
1365 }
1366 }
1367
1368 return false;
1369}
1370
Peter Maydell3b643492015-04-26 16:49:23 +01001371MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1372 hwaddr addr,
1373 uint64_t data,
1374 unsigned size,
1375 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001376{
Avi Kivity897fa7c2011-11-13 13:05:27 +02001377 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001378 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001379 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001380 }
1381
Avi Kivitya621f382012-01-02 13:12:08 +02001382 adjust_endianness(mr, &data, size);
1383
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001384 if ((!kvm_eventfds_enabled()) &&
1385 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1386 return MEMTX_OK;
1387 }
1388
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001389 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001390 return access_with_adjusted_size(addr, &data, size,
1391 mr->ops->impl.min_access_size,
1392 mr->ops->impl.max_access_size,
1393 memory_region_write_accessor, mr,
1394 attrs);
1395 } else if (mr->ops->write_with_attrs) {
1396 return
1397 access_with_adjusted_size(addr, &data, size,
1398 mr->ops->impl.min_access_size,
1399 mr->ops->impl.max_access_size,
1400 memory_region_write_with_attrs_accessor,
1401 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001402 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001403 return access_with_adjusted_size(addr, &data, size, 1, 4,
1404 memory_region_oldmmio_write_accessor,
1405 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001406 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001407}
1408
Avi Kivity093bc2c2011-07-26 14:26:01 +03001409void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001410 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001411 const MemoryRegionOps *ops,
1412 void *opaque,
1413 const char *name,
1414 uint64_t size)
1415{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001416 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001417 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001418 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001419 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001420}
1421
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001422void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1423 Object *owner,
1424 const char *name,
1425 uint64_t size,
1426 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001427{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001428 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001429 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001430 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001431 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001432 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001433 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001434}
1435
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001436void memory_region_init_resizeable_ram(MemoryRegion *mr,
1437 Object *owner,
1438 const char *name,
1439 uint64_t size,
1440 uint64_t max_size,
1441 void (*resized)(const char*,
1442 uint64_t length,
1443 void *host),
1444 Error **errp)
1445{
1446 memory_region_init(mr, owner, name, size);
1447 mr->ram = true;
1448 mr->terminates = true;
1449 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001450 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1451 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001452 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001453}
1454
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001455#ifdef __linux__
1456void memory_region_init_ram_from_file(MemoryRegion *mr,
1457 struct Object *owner,
1458 const char *name,
1459 uint64_t size,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001460 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001461 const char *path,
1462 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001463{
1464 memory_region_init(mr, owner, name, size);
1465 mr->ram = true;
1466 mr->terminates = true;
1467 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001468 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001469 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001470}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001471
1472void memory_region_init_ram_from_fd(MemoryRegion *mr,
1473 struct Object *owner,
1474 const char *name,
1475 uint64_t size,
1476 bool share,
1477 int fd,
1478 Error **errp)
1479{
1480 memory_region_init(mr, owner, name, size);
1481 mr->ram = true;
1482 mr->terminates = true;
1483 mr->destructor = memory_region_destructor_ram;
1484 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1485 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1486}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001487#endif
1488
Avi Kivity093bc2c2011-07-26 14:26:01 +03001489void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001490 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001491 const char *name,
1492 uint64_t size,
1493 void *ptr)
1494{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001495 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001496 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001497 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001498 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001499 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001500
1501 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1502 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001503 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001504}
1505
Alex Williamson21e00fa2016-10-31 09:53:03 -06001506void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1507 Object *owner,
1508 const char *name,
1509 uint64_t size,
1510 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301511{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001512 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1513 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001514 mr->ops = &ram_device_mem_ops;
1515 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301516}
1517
Avi Kivity093bc2c2011-07-26 14:26:01 +03001518void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001519 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001520 const char *name,
1521 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001522 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001523 uint64_t size)
1524{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001525 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001526 mr->alias = orig;
1527 mr->alias_offset = offset;
1528}
1529
Peter Maydellb59821a2017-07-07 15:42:50 +01001530void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1531 struct Object *owner,
1532 const char *name,
1533 uint64_t size,
1534 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001535{
1536 memory_region_init(mr, owner, name, size);
1537 mr->ram = true;
1538 mr->readonly = true;
1539 mr->terminates = true;
1540 mr->destructor = memory_region_destructor_ram;
1541 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1542 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1543}
1544
Peter Maydellb59821a2017-07-07 15:42:50 +01001545void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1546 Object *owner,
1547 const MemoryRegionOps *ops,
1548 void *opaque,
1549 const char *name,
1550 uint64_t size,
1551 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001552{
Peter Maydell39e0b032016-07-04 13:06:35 +01001553 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001554 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001555 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001556 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001557 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001558 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001559 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001560 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001561}
1562
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001563void memory_region_init_iommu(void *_iommu_mr,
1564 size_t instance_size,
1565 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001566 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001567 const char *name,
1568 uint64_t size)
1569{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001570 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001571 struct MemoryRegion *mr;
1572
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001573 object_initialize(_iommu_mr, instance_size, mrtypename);
1574 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001575 memory_region_do_init(mr, owner, name, size);
1576 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001577 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001578 QLIST_INIT(&iommu_mr->iommu_notify);
1579 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001580}
1581
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001582static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001583{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001584 MemoryRegion *mr = MEMORY_REGION(obj);
1585
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001586 assert(!mr->container);
1587
1588 /* We know the region is not visible in any address space (it
1589 * does not have a container and cannot be a root either because
1590 * it has no references, so we can blindly clear mr->enabled.
1591 * memory_region_set_enabled instead could trigger a transaction
1592 * and cause an infinite loop.
1593 */
1594 mr->enabled = false;
1595 memory_region_transaction_begin();
1596 while (!QTAILQ_EMPTY(&mr->subregions)) {
1597 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1598 memory_region_del_subregion(mr, subregion);
1599 }
1600 memory_region_transaction_commit();
1601
Avi Kivity545e92e2011-08-08 19:58:48 +03001602 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001603 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001604 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001605 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001606}
1607
Paolo Bonzini803c0812013-05-07 06:59:09 +02001608Object *memory_region_owner(MemoryRegion *mr)
1609{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001610 Object *obj = OBJECT(mr);
1611 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001612}
1613
Paolo Bonzini46637be2013-05-07 09:06:00 +02001614void memory_region_ref(MemoryRegion *mr)
1615{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001616 /* MMIO callbacks most likely will access data that belongs
1617 * to the owner, hence the need to ref/unref the owner whenever
1618 * the memory region is in use.
1619 *
1620 * The memory region is a child of its owner. As long as the
1621 * owner doesn't call unparent itself on the memory region,
1622 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001623 * Memory regions without an owner are supposed to never go away;
1624 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001625 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001626 if (mr && mr->owner) {
1627 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001628 }
1629}
1630
1631void memory_region_unref(MemoryRegion *mr)
1632{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001633 if (mr && mr->owner) {
1634 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001635 }
1636}
1637
Avi Kivity093bc2c2011-07-26 14:26:01 +03001638uint64_t memory_region_size(MemoryRegion *mr)
1639{
Avi Kivity08dafab2011-10-16 13:19:17 +02001640 if (int128_eq(mr->size, int128_2_64())) {
1641 return UINT64_MAX;
1642 }
1643 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001644}
1645
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001646const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001647{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001648 if (!mr->name) {
1649 ((MemoryRegion *)mr)->name =
1650 object_get_canonical_path_component(OBJECT(mr));
1651 }
Peter Maydell302fa282014-08-19 20:05:46 +01001652 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001653}
1654
Alex Williamson21e00fa2016-10-31 09:53:03 -06001655bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301656{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001657 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301658}
1659
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001660uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001661{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001662 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001663 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001664 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1665 }
1666 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001667}
1668
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001669bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1670{
1671 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1672}
1673
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001674static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001675{
1676 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1677 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001678 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001679
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001680 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001681 flags |= iommu_notifier->notifier_flags;
1682 }
1683
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001684 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1685 imrc->notify_flag_changed(iommu_mr,
1686 iommu_mr->iommu_notify_flags,
1687 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001688 }
1689
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001690 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001691}
1692
Peter Xucdb30812016-09-23 13:02:26 +08001693void memory_region_register_iommu_notifier(MemoryRegion *mr,
1694 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001695{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001696 IOMMUMemoryRegion *iommu_mr;
1697
Jason Wangefcd38c2016-12-30 18:09:17 +08001698 if (mr->alias) {
1699 memory_region_register_iommu_notifier(mr->alias, n);
1700 return;
1701 }
1702
Peter Xucdb30812016-09-23 13:02:26 +08001703 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001704 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001705 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001706 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001707 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1708 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001709}
1710
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001711uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001712{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001713 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1714
1715 if (imrc->get_min_page_size) {
1716 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001717 }
1718 return TARGET_PAGE_SIZE;
1719}
1720
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001721void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001722{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001723 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001724 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001725 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001726 IOMMUTLBEntry iotlb;
1727
Peter Xufaa362e2017-04-07 18:59:11 +08001728 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001729 if (imrc->replay) {
1730 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001731 return;
1732 }
1733
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001734 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001735
David Gibsona788f222015-09-30 12:13:55 +10001736 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001737 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001738 if (iotlb.perm != IOMMU_NONE) {
1739 n->notify(n, &iotlb);
1740 }
1741
1742 /* if (2^64 - MR size) < granularity, it's possible to get an
1743 * infinite loop here. This should catch such a wraparound */
1744 if ((addr + granularity) < addr) {
1745 break;
1746 }
1747 }
1748}
1749
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001750void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001751{
1752 IOMMUNotifier *notifier;
1753
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001754 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1755 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001756 }
1757}
1758
Peter Xucdb30812016-09-23 13:02:26 +08001759void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1760 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001761{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001762 IOMMUMemoryRegion *iommu_mr;
1763
Jason Wangefcd38c2016-12-30 18:09:17 +08001764 if (mr->alias) {
1765 memory_region_unregister_iommu_notifier(mr->alias, n);
1766 return;
1767 }
Peter Xucdb30812016-09-23 13:02:26 +08001768 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001769 iommu_mr = IOMMU_MEMORY_REGION(mr);
1770 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001771}
1772
Peter Xubd2bfa42017-04-07 18:59:10 +08001773void memory_region_notify_one(IOMMUNotifier *notifier,
1774 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001775{
Peter Xucdb30812016-09-23 13:02:26 +08001776 IOMMUNotifierFlag request_flags;
1777
Peter Xubd2bfa42017-04-07 18:59:10 +08001778 /*
1779 * Skip the notification if the notification does not overlap
1780 * with registered range.
1781 */
1782 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1783 notifier->end < entry->iova) {
1784 return;
1785 }
Peter Xucdb30812016-09-23 13:02:26 +08001786
Peter Xubd2bfa42017-04-07 18:59:10 +08001787 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001788 request_flags = IOMMU_NOTIFIER_MAP;
1789 } else {
1790 request_flags = IOMMU_NOTIFIER_UNMAP;
1791 }
1792
Peter Xubd2bfa42017-04-07 18:59:10 +08001793 if (notifier->notifier_flags & request_flags) {
1794 notifier->notify(notifier, entry);
1795 }
1796}
1797
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001798void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001799 IOMMUTLBEntry entry)
1800{
1801 IOMMUNotifier *iommu_notifier;
1802
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001803 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001804
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001805 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001806 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001807 }
David Gibson06866572013-05-14 19:13:56 +10001808}
1809
Avi Kivity093bc2c2011-07-26 14:26:01 +03001810void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1811{
Avi Kivity5a583342011-07-26 14:26:02 +03001812 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001813 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001814
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001815 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001816 old_logging = mr->vga_logging_count;
1817 mr->vga_logging_count += log ? 1 : -1;
1818 if (!!old_logging == !!mr->vga_logging_count) {
1819 return;
1820 }
1821
Jan Kiszka59023ef2012-08-23 13:02:30 +02001822 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001823 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001824 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001825 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001826}
1827
Avi Kivitya8170e52012-10-23 12:30:10 +02001828bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1829 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001830{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001831 assert(mr->ram_block);
1832 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1833 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001834}
1835
Avi Kivitya8170e52012-10-23 12:30:10 +02001836void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1837 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001838{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001839 assert(mr->ram_block);
1840 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1841 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001842 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001843}
1844
Juan Quintela6c279db2012-10-17 20:24:28 +02001845bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1846 hwaddr size, unsigned client)
1847{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001848 assert(mr->ram_block);
1849 return cpu_physical_memory_test_and_clear_dirty(
1850 memory_region_get_ram_addr(mr) + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001851}
1852
Gerd Hoffmann8deaf122017-04-21 11:16:25 +02001853DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1854 hwaddr addr,
1855 hwaddr size,
1856 unsigned client)
1857{
1858 assert(mr->ram_block);
1859 return cpu_physical_memory_snapshot_and_clear_dirty(
1860 memory_region_get_ram_addr(mr) + addr, size, client);
1861}
1862
1863bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1864 hwaddr addr, hwaddr size)
1865{
1866 assert(mr->ram_block);
1867 return cpu_physical_memory_snapshot_get_dirty(snap,
1868 memory_region_get_ram_addr(mr) + addr, size);
1869}
Juan Quintela6c279db2012-10-17 20:24:28 +02001870
Avi Kivity093bc2c2011-07-26 14:26:01 +03001871void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1872{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001873 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001874 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001875 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001876 FlatRange *fr;
1877
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001878 /* If the same address space has multiple log_sync listeners, we
1879 * visit that address space's FlatView multiple times. But because
1880 * log_sync listeners are rare, it's still cheaper than walking each
1881 * address space once.
1882 */
1883 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1884 if (!listener->log_sync) {
1885 continue;
1886 }
1887 as = listener->address_space;
1888 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001889 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001890 if (fr->mr == mr) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10001891 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001892 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001893 }
Avi Kivity5a583342011-07-26 14:26:02 +03001894 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001895 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001896 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001897}
1898
1899void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1900{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001901 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001902 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001903 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01001904 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001905 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001906 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001907}
1908
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001909void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001910{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001911 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001912 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001913 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01001914 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001915 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001916 }
1917}
1918
Avi Kivitya8170e52012-10-23 12:30:10 +02001919void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1920 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001921{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001922 assert(mr->ram_block);
1923 cpu_physical_memory_test_and_clear_dirty(
1924 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001925}
1926
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001927int memory_region_get_fd(MemoryRegion *mr)
1928{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001929 int fd;
1930
1931 rcu_read_lock();
1932 while (mr->alias) {
1933 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001934 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001935 fd = mr->ram_block->fd;
1936 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001937
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001938 return fd;
1939}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001940
Avi Kivity093bc2c2011-07-26 14:26:01 +03001941void *memory_region_get_ram_ptr(MemoryRegion *mr)
1942{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001943 void *ptr;
1944 uint64_t offset = 0;
1945
1946 rcu_read_lock();
1947 while (mr->alias) {
1948 offset += mr->alias_offset;
1949 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001950 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08001951 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001952 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001953 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001954
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001955 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001956}
1957
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001958MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1959{
1960 RAMBlock *block;
1961
1962 block = qemu_ram_block_from_host(ptr, false, offset);
1963 if (!block) {
1964 return NULL;
1965 }
1966
1967 return block->mr;
1968}
1969
Fam Zheng7ebb2742016-03-01 14:18:20 +08001970ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1971{
1972 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1973}
1974
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001975void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1976{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001977 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001978
Gongleifa53a0e2016-05-10 10:04:59 +08001979 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001980}
1981
Avi Kivity0d673e32012-10-02 15:28:50 +02001982static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001983{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001984 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001985 FlatRange *fr;
1986 CoalescedMemoryRange *cmr;
1987 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02001988 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001989
Paolo Bonzini856d7242013-05-06 11:57:21 +02001990 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001991 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001992 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02001993 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10001994 .fv = view,
Avi Kivity95d29942012-10-02 18:21:54 +02001995 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001996 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02001997 };
1998
Paolo Bonzini9a546352016-09-22 16:23:06 +02001999 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002000 int128_get64(fr->addr.start),
2001 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002002 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2003 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02002004 int128_sub(fr->addr.start,
2005 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002006 if (!addrrange_intersects(tmp, fr->addr)) {
2007 continue;
2008 }
2009 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002010 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002011 int128_get64(tmp.start),
2012 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002013 }
2014 }
2015 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002016 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002017}
2018
Avi Kivity0d673e32012-10-02 15:28:50 +02002019static void memory_region_update_coalesced_range(MemoryRegion *mr)
2020{
2021 AddressSpace *as;
2022
2023 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2024 memory_region_update_coalesced_range_as(mr, as);
2025 }
2026}
2027
Avi Kivity093bc2c2011-07-26 14:26:01 +03002028void memory_region_set_coalescing(MemoryRegion *mr)
2029{
2030 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002031 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002032}
2033
2034void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002035 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002036 uint64_t size)
2037{
Anthony Liguori7267c092011-08-20 22:09:37 -05002038 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002039
Avi Kivity08dafab2011-10-16 13:19:17 +02002040 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002041 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2042 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002043 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002044}
2045
2046void memory_region_clear_coalescing(MemoryRegion *mr)
2047{
2048 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002049 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002050
Jan Kiszkad4105152012-08-23 13:02:29 +02002051 qemu_flush_coalesced_mmio_buffer();
2052 mr->flush_coalesced_mmio = false;
2053
Avi Kivity093bc2c2011-07-26 14:26:01 +03002054 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2055 cmr = QTAILQ_FIRST(&mr->coalesced);
2056 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002057 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002058 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002059 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002060
2061 if (updated) {
2062 memory_region_update_coalesced_range(mr);
2063 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002064}
2065
Jan Kiszkad4105152012-08-23 13:02:29 +02002066void memory_region_set_flush_coalesced(MemoryRegion *mr)
2067{
2068 mr->flush_coalesced_mmio = true;
2069}
2070
2071void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2072{
2073 qemu_flush_coalesced_mmio_buffer();
2074 if (QTAILQ_EMPTY(&mr->coalesced)) {
2075 mr->flush_coalesced_mmio = false;
2076 }
2077}
2078
Jan Kiszka196ea132015-06-18 18:47:20 +02002079void memory_region_set_global_locking(MemoryRegion *mr)
2080{
2081 mr->global_locking = true;
2082}
2083
2084void memory_region_clear_global_locking(MemoryRegion *mr)
2085{
2086 mr->global_locking = false;
2087}
2088
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002089static bool userspace_eventfd_warning;
2090
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002091void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002092 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002093 unsigned size,
2094 bool match_data,
2095 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002096 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002097{
2098 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002099 .addr.start = int128_make64(addr),
2100 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002101 .match_data = match_data,
2102 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002103 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002104 };
2105 unsigned i;
2106
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002107 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2108 userspace_eventfd_warning))) {
2109 userspace_eventfd_warning = true;
2110 error_report("Using eventfd without MMIO binding in KVM. "
2111 "Suboptimal performance expected");
2112 }
2113
Jason Wangb8aecea2015-11-06 16:02:45 +08002114 if (size) {
2115 adjust_endianness(mr, &mrfd.data, size);
2116 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002117 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002118 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2119 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2120 break;
2121 }
2122 }
2123 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002124 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002125 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2126 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2127 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2128 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002129 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002130 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002131}
2132
2133void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002134 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002135 unsigned size,
2136 bool match_data,
2137 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002138 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002139{
2140 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002141 .addr.start = int128_make64(addr),
2142 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002143 .match_data = match_data,
2144 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002145 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002146 };
2147 unsigned i;
2148
Jason Wangb8aecea2015-11-06 16:02:45 +08002149 if (size) {
2150 adjust_endianness(mr, &mrfd.data, size);
2151 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002152 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002153 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2154 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2155 break;
2156 }
2157 }
2158 assert(i != mr->ioeventfd_nb);
2159 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2160 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2161 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002162 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002163 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002164 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002165 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002166}
2167
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002168static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002169{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002170 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002171 MemoryRegion *other;
2172
Jan Kiszka59023ef2012-08-23 13:02:30 +02002173 memory_region_transaction_begin();
2174
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002175 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002176 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002177 if (subregion->priority >= other->priority) {
2178 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2179 goto done;
2180 }
2181 }
2182 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2183done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002184 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002185 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002186}
2187
Peter Crosthwaite05987012014-06-05 23:14:44 -07002188static void memory_region_add_subregion_common(MemoryRegion *mr,
2189 hwaddr offset,
2190 MemoryRegion *subregion)
2191{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002192 assert(!subregion->container);
2193 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002194 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002195 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002196}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002197
2198void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002199 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002200 MemoryRegion *subregion)
2201{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002202 subregion->priority = 0;
2203 memory_region_add_subregion_common(mr, offset, subregion);
2204}
2205
2206void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002207 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002208 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002209 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002210{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002211 subregion->priority = priority;
2212 memory_region_add_subregion_common(mr, offset, subregion);
2213}
2214
2215void memory_region_del_subregion(MemoryRegion *mr,
2216 MemoryRegion *subregion)
2217{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002218 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002219 assert(subregion->container == mr);
2220 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002221 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002222 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002223 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002224 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002225}
2226
2227void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2228{
2229 if (enabled == mr->enabled) {
2230 return;
2231 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002232 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002233 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002234 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002235 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002236}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002237
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002238void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2239{
2240 Int128 s = int128_make64(size);
2241
2242 if (size == UINT64_MAX) {
2243 s = int128_2_64();
2244 }
2245 if (int128_eq(s, mr->size)) {
2246 return;
2247 }
2248 memory_region_transaction_begin();
2249 mr->size = s;
2250 memory_region_update_pending = true;
2251 memory_region_transaction_commit();
2252}
2253
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002254static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002255{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002256 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002257
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002258 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002259 memory_region_transaction_begin();
2260 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002261 memory_region_del_subregion(container, mr);
2262 mr->container = container;
2263 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002264 memory_region_unref(mr);
2265 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002266 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002267}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002268
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002269void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2270{
2271 if (addr != mr->addr) {
2272 mr->addr = addr;
2273 memory_region_readd_subregion(mr);
2274 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002275}
2276
Avi Kivitya8170e52012-10-23 12:30:10 +02002277void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002278{
Avi Kivity47033592011-12-04 19:16:50 +02002279 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002280
Jan Kiszka59023ef2012-08-23 13:02:30 +02002281 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002282 return;
2283 }
2284
Jan Kiszka59023ef2012-08-23 13:02:30 +02002285 memory_region_transaction_begin();
2286 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002287 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002288 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002289}
2290
Igor Mammedova2b257d2014-10-31 16:38:37 +00002291uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2292{
2293 return mr->align;
2294}
2295
Avi Kivitye2177952011-12-08 15:00:18 +02002296static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2297{
2298 const AddrRange *addr = addr_;
2299 const FlatRange *fr = fr_;
2300
2301 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2302 return -1;
2303 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2304 return 1;
2305 }
2306 return 0;
2307}
2308
Paolo Bonzini99e86342013-05-06 10:26:13 +02002309static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002310{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002311 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002312 sizeof(FlatRange), cmp_flatrange_addr);
2313}
2314
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002315bool memory_region_is_mapped(MemoryRegion *mr)
2316{
2317 return mr->container ? true : false;
2318}
2319
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002320/* Same as memory_region_find, but it does not add a reference to the
2321 * returned region. It must be called from an RCU critical section.
2322 */
2323static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2324 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002325{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002326 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002327 MemoryRegion *root;
2328 AddressSpace *as;
2329 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002330 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002331 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002332
Paolo Bonzini73034e92013-05-07 15:48:28 +02002333 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002334 for (root = mr; root->container; ) {
2335 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002336 addr += root->addr;
2337 }
2338
2339 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002340 if (!as) {
2341 return ret;
2342 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002343 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002344
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002345 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002346 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002347 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002348 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002349 }
2350
Paolo Bonzini99e86342013-05-06 10:26:13 +02002351 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002352 --fr;
2353 }
2354
2355 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002356 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002357 range = addrrange_intersection(range, fr->addr);
2358 ret.offset_within_region = fr->offset_in_region;
2359 ret.offset_within_region += int128_get64(int128_sub(range.start,
2360 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002361 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002362 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002363 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002364 return ret;
2365}
2366
2367MemoryRegionSection memory_region_find(MemoryRegion *mr,
2368 hwaddr addr, uint64_t size)
2369{
2370 MemoryRegionSection ret;
2371 rcu_read_lock();
2372 ret = memory_region_find_rcu(mr, addr, size);
2373 if (ret.mr) {
2374 memory_region_ref(ret.mr);
2375 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002376 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002377 return ret;
2378}
2379
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002380bool memory_region_present(MemoryRegion *container, hwaddr addr)
2381{
2382 MemoryRegion *mr;
2383
2384 rcu_read_lock();
2385 mr = memory_region_find_rcu(container, addr, 1).mr;
2386 rcu_read_unlock();
2387 return mr && mr != container;
2388}
2389
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002390void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002391{
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002392 MemoryListener *listener;
2393 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002394 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002395 FlatRange *fr;
2396
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002397 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2398 if (!listener->log_sync) {
2399 continue;
2400 }
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002401 as = listener->address_space;
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002402 view = address_space_get_flatview(as);
2403 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonziniadaad612016-09-22 16:09:08 +02002404 if (fr->dirty_log_mask) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002405 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2406
Paolo Bonziniadaad612016-09-22 16:09:08 +02002407 listener->log_sync(listener, &mrs);
2408 }
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002409 }
2410 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002411 }
2412}
2413
Jay Zhou19310762017-07-28 18:28:53 +08002414static VMChangeStateEntry *vmstate_change;
2415
Avi Kivity7664e802011-12-11 14:47:25 +02002416void memory_global_dirty_log_start(void)
2417{
Jay Zhou19310762017-07-28 18:28:53 +08002418 if (vmstate_change) {
2419 qemu_del_vm_change_state_handler(vmstate_change);
2420 vmstate_change = NULL;
2421 }
2422
Avi Kivity7664e802011-12-11 14:47:25 +02002423 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002424
Avi Kivity7376e582012-02-08 21:05:17 +02002425 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002426
2427 /* Refresh DIRTY_LOG_MIGRATION bit. */
2428 memory_region_transaction_begin();
2429 memory_region_update_pending = true;
2430 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002431}
2432
Jay Zhou19310762017-07-28 18:28:53 +08002433static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002434{
Avi Kivity7664e802011-12-11 14:47:25 +02002435 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002436
2437 /* Refresh DIRTY_LOG_MIGRATION bit. */
2438 memory_region_transaction_begin();
2439 memory_region_update_pending = true;
2440 memory_region_transaction_commit();
2441
Avi Kivity7376e582012-02-08 21:05:17 +02002442 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002443}
2444
Jay Zhou19310762017-07-28 18:28:53 +08002445static void memory_vm_change_state_handler(void *opaque, int running,
2446 RunState state)
2447{
2448 if (running) {
2449 memory_global_dirty_log_do_stop();
2450
2451 if (vmstate_change) {
2452 qemu_del_vm_change_state_handler(vmstate_change);
2453 vmstate_change = NULL;
2454 }
2455 }
2456}
2457
2458void memory_global_dirty_log_stop(void)
2459{
2460 if (!runstate_is_running()) {
2461 if (vmstate_change) {
2462 return;
2463 }
2464 vmstate_change = qemu_add_vm_change_state_handler(
2465 memory_vm_change_state_handler, NULL);
2466 return;
2467 }
2468
2469 memory_global_dirty_log_do_stop();
2470}
2471
Avi Kivity7664e802011-12-11 14:47:25 +02002472static void listener_add_address_space(MemoryListener *listener,
2473 AddressSpace *as)
2474{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002475 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002476 FlatRange *fr;
2477
Paolo Bonzini680a4782015-11-02 09:23:52 +01002478 if (listener->begin) {
2479 listener->begin(listener);
2480 }
Avi Kivity7664e802011-12-11 14:47:25 +02002481 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002482 if (listener->log_global_start) {
2483 listener->log_global_start(listener);
2484 }
Avi Kivity7664e802011-12-11 14:47:25 +02002485 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002486
Paolo Bonzini856d7242013-05-06 11:57:21 +02002487 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002488 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity7664e802011-12-11 14:47:25 +02002489 MemoryRegionSection section = {
2490 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002491 .fv = view,
Avi Kivity7664e802011-12-11 14:47:25 +02002492 .offset_within_region = fr->offset_in_region,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002493 .size = fr->addr.size,
Avi Kivity7664e802011-12-11 14:47:25 +02002494 .offset_within_address_space = int128_get64(fr->addr.start),
Avi Kivity7a8499e2012-02-08 17:01:23 +02002495 .readonly = fr->readonly,
Avi Kivity7664e802011-12-11 14:47:25 +02002496 };
Paolo Bonzini680a4782015-11-02 09:23:52 +01002497 if (fr->dirty_log_mask && listener->log_start) {
2498 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2499 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002500 if (listener->region_add) {
2501 listener->region_add(listener, &section);
2502 }
Avi Kivity7664e802011-12-11 14:47:25 +02002503 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002504 if (listener->commit) {
2505 listener->commit(listener);
2506 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002507 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002508}
2509
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002510void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002511{
Avi Kivity72e22d22012-02-08 15:05:50 +02002512 MemoryListener *other = NULL;
2513
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002514 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002515 if (QTAILQ_EMPTY(&memory_listeners)
2516 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2517 memory_listeners)->priority) {
2518 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2519 } else {
2520 QTAILQ_FOREACH(other, &memory_listeners, link) {
2521 if (listener->priority < other->priority) {
2522 break;
2523 }
2524 }
2525 QTAILQ_INSERT_BEFORE(other, listener, link);
2526 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002527
Paolo Bonzini9a546352016-09-22 16:23:06 +02002528 if (QTAILQ_EMPTY(&as->listeners)
2529 || listener->priority >= QTAILQ_LAST(&as->listeners,
2530 memory_listeners)->priority) {
2531 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2532 } else {
2533 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2534 if (listener->priority < other->priority) {
2535 break;
2536 }
2537 }
2538 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2539 }
2540
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002541 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002542}
2543
2544void memory_listener_unregister(MemoryListener *listener)
2545{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002546 if (!listener->address_space) {
2547 return;
2548 }
2549
Avi Kivity72e22d22012-02-08 15:05:50 +02002550 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002551 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002552 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002553}
Avi Kivitye2177952011-12-08 15:00:18 +02002554
KONRAD Fredericc9356742016-10-19 15:06:49 +02002555bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2556{
2557 void *host;
2558 unsigned size = 0;
2559 unsigned offset = 0;
2560 Object *new_interface;
2561
2562 if (!mr || !mr->ops->request_ptr) {
2563 return false;
2564 }
2565
2566 /*
2567 * Avoid an update if the request_ptr call
2568 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2569 * a cache.
2570 */
2571 memory_region_transaction_begin();
2572
2573 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2574
2575 if (!host || !size) {
2576 memory_region_transaction_commit();
2577 return false;
2578 }
2579
2580 new_interface = object_new("mmio_interface");
2581 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2582 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2583 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2584 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2585 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2586 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2587
2588 memory_region_transaction_commit();
2589 return true;
2590}
2591
2592typedef struct MMIOPtrInvalidate {
2593 MemoryRegion *mr;
2594 hwaddr offset;
2595 unsigned size;
2596 int busy;
2597 int allocated;
2598} MMIOPtrInvalidate;
2599
2600#define MAX_MMIO_INVALIDATE 10
2601static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2602
2603static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2604 run_on_cpu_data data)
2605{
2606 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2607 MemoryRegion *mr = invalidate_data->mr;
2608 hwaddr offset = invalidate_data->offset;
2609 unsigned size = invalidate_data->size;
2610 MemoryRegionSection section = memory_region_find(mr, offset, size);
2611
2612 qemu_mutex_lock_iothread();
2613
2614 /* Reset dirty so this doesn't happen later. */
2615 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2616
2617 if (section.mr != mr) {
2618 /* memory_region_find add a ref on section.mr */
2619 memory_region_unref(section.mr);
2620 if (MMIO_INTERFACE(section.mr->owner)) {
2621 /* We found the interface just drop it. */
2622 object_property_set_bool(section.mr->owner, false, "realized",
2623 NULL);
2624 object_unref(section.mr->owner);
2625 object_unparent(section.mr->owner);
2626 }
2627 }
2628
2629 qemu_mutex_unlock_iothread();
2630
2631 if (invalidate_data->allocated) {
2632 g_free(invalidate_data);
2633 } else {
2634 invalidate_data->busy = 0;
2635 }
2636}
2637
2638void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2639 unsigned size)
2640{
2641 size_t i;
2642 MMIOPtrInvalidate *invalidate_data = NULL;
2643
2644 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2645 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2646 invalidate_data = &mmio_ptr_invalidate_list[i];
2647 break;
2648 }
2649 }
2650
2651 if (!invalidate_data) {
2652 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2653 invalidate_data->allocated = 1;
2654 }
2655
2656 invalidate_data->mr = mr;
2657 invalidate_data->offset = offset;
2658 invalidate_data->size = size;
2659
2660 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2661 RUN_ON_CPU_HOST_PTR(invalidate_data));
2662}
2663
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002664void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002665{
Paolo Bonziniac951902015-02-11 15:21:04 +01002666 memory_region_ref(root);
Jan Kiszka59023ef2012-08-23 13:02:30 +02002667 memory_region_transaction_begin();
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002668 as->ref_count = 1;
Avi Kivity8786db72012-10-02 13:53:41 +02002669 as->root = root;
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002670 as->malloced = false;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +10002671 as->current_map = flatview_new(root);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002672 as->ioeventfd_nb = 0;
2673 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002674 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002675 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002676 as->name = g_strdup(name ? name : "anonymous");
Paolo Bonzinif43793c2013-04-16 15:39:51 +02002677 memory_region_update_pending |= root->enabled;
2678 memory_region_transaction_commit();
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002679}
Avi Kivity658b2222011-07-26 14:26:08 +03002680
Paolo Bonzini374f2982013-05-17 12:37:03 +02002681static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002682{
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002683 bool do_free = as->malloced;
David Gibson078c44f2014-05-30 12:59:00 -06002684
Paolo Bonzini9a546352016-09-22 16:23:06 +02002685 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002686
Paolo Bonzini856d7242013-05-06 11:57:21 +02002687 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002688 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002689 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002690 memory_region_unref(as->root);
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002691 if (do_free) {
2692 g_free(as);
2693 }
2694}
2695
2696AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2697{
2698 AddressSpace *as;
2699
2700 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2701 if (root == as->root && as->malloced) {
2702 as->ref_count++;
2703 return as;
2704 }
2705 }
2706
2707 as = g_malloc0(sizeof *as);
2708 address_space_init(as, root, name);
2709 as->malloced = true;
2710 return as;
Avi Kivity83f3c252012-10-07 12:59:55 +02002711}
2712
Paolo Bonzini374f2982013-05-17 12:37:03 +02002713void address_space_destroy(AddressSpace *as)
2714{
Paolo Bonziniac951902015-02-11 15:21:04 +01002715 MemoryRegion *root = as->root;
2716
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002717 as->ref_count--;
2718 if (as->ref_count) {
2719 return;
2720 }
Paolo Bonzini374f2982013-05-17 12:37:03 +02002721 /* Flush out anything from MemoryListeners listening in on this */
2722 memory_region_transaction_begin();
2723 as->root = NULL;
2724 memory_region_transaction_commit();
2725 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2726
2727 /* At this point, as->dispatch and as->current_map are dummy
2728 * entries that the guest should never use. Wait for the old
2729 * values to expire before freeing the data.
2730 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002731 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002732 call_rcu(as, do_address_space_destroy, rcu);
2733}
2734
Peter Xu4e831902017-01-16 16:40:04 +08002735static const char *memory_region_type(MemoryRegion *mr)
2736{
2737 if (memory_region_is_ram_device(mr)) {
2738 return "ramd";
2739 } else if (memory_region_is_romd(mr)) {
2740 return "romd";
2741 } else if (memory_region_is_rom(mr)) {
2742 return "rom";
2743 } else if (memory_region_is_ram(mr)) {
2744 return "ram";
2745 } else {
2746 return "i/o";
2747 }
2748}
2749
Blue Swirl314e2982011-09-11 20:22:05 +00002750typedef struct MemoryRegionList MemoryRegionList;
2751
2752struct MemoryRegionList {
2753 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002754 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002755};
2756
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002757typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002758
Peter Xu4e831902017-01-16 16:40:04 +08002759#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2760 int128_sub((size), int128_one())) : 0)
2761#define MTREE_INDENT " "
2762
Blue Swirl314e2982011-09-11 20:22:05 +00002763static void mtree_print_mr(fprintf_function mon_printf, void *f,
2764 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002765 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002766 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002767{
Jan Kiszka9479c572011-09-27 15:00:41 +02002768 MemoryRegionList *new_ml, *ml, *next_ml;
2769 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002770 const MemoryRegion *submr;
2771 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002772 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002773
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002774 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002775 return;
2776 }
2777
2778 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002779 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002780 }
2781
Peter Xub31f8412017-03-14 20:56:27 +08002782 cur_start = base + mr->addr;
2783 cur_end = cur_start + MR_SIZE(mr->size);
2784
2785 /*
2786 * Try to detect overflow of memory region. This should never
2787 * happen normally. When it happens, we dump something to warn the
2788 * user who is observing this.
2789 */
2790 if (cur_start < base || cur_end < cur_start) {
2791 mon_printf(f, "[DETECTED OVERFLOW!] ");
2792 }
2793
Blue Swirl314e2982011-09-11 20:22:05 +00002794 if (mr->alias) {
2795 MemoryRegionList *ml;
2796 bool found = false;
2797
2798 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002799 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002800 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002801 found = true;
2802 }
2803 }
2804
2805 if (!found) {
2806 ml = g_new(MemoryRegionList, 1);
2807 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002808 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002809 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002810 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002811 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002812 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002813 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002814 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002815 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002816 memory_region_name(mr),
2817 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002818 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002819 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002820 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002821 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002822 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002823 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002824 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002825 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002826 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002827 memory_region_name(mr),
2828 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002829 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002830
2831 QTAILQ_INIT(&submr_print_queue);
2832
Blue Swirl314e2982011-09-11 20:22:05 +00002833 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002834 new_ml = g_new(MemoryRegionList, 1);
2835 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002836 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002837 if (new_ml->mr->addr < ml->mr->addr ||
2838 (new_ml->mr->addr == ml->mr->addr &&
2839 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002840 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002841 new_ml = NULL;
2842 break;
2843 }
2844 }
2845 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002846 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002847 }
2848 }
2849
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002850 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002851 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002852 alias_print_queue);
2853 }
2854
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002855 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002856 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002857 }
2858}
2859
Peter Xu57bb40c2017-01-16 16:40:05 +08002860static void mtree_print_flatview(fprintf_function p, void *f,
2861 AddressSpace *as)
2862{
2863 FlatView *view = address_space_get_flatview(as);
2864 FlatRange *range = &view->ranges[0];
2865 MemoryRegion *mr;
2866 int n = view->nr;
2867
2868 if (n <= 0) {
2869 p(f, MTREE_INDENT "No rendered FlatView for "
2870 "address space '%s'\n", as->name);
2871 flatview_unref(view);
2872 return;
2873 }
2874
2875 while (n--) {
2876 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002877 if (range->offset_in_region) {
2878 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2879 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2880 int128_get64(range->addr.start),
2881 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2882 mr->priority,
2883 range->readonly ? "rom" : memory_region_type(mr),
2884 memory_region_name(mr),
2885 range->offset_in_region);
2886 } else {
2887 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2888 TARGET_FMT_plx " (prio %d, %s): %s\n",
2889 int128_get64(range->addr.start),
2890 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2891 mr->priority,
2892 range->readonly ? "rom" : memory_region_type(mr),
2893 memory_region_name(mr));
2894 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002895 range++;
2896 }
2897
2898 flatview_unref(view);
2899}
2900
2901void mtree_info(fprintf_function mon_printf, void *f, bool flatview)
Blue Swirl314e2982011-09-11 20:22:05 +00002902{
2903 MemoryRegionListHead ml_head;
2904 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02002905 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00002906
Peter Xu57bb40c2017-01-16 16:40:05 +08002907 if (flatview) {
2908 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2909 mon_printf(f, "address-space (flat view): %s\n", as->name);
2910 mtree_print_flatview(mon_printf, f, as);
2911 mon_printf(f, "\n");
2912 }
2913 return;
2914 }
2915
Blue Swirl314e2982011-09-11 20:22:05 +00002916 QTAILQ_INIT(&ml_head);
2917
Avi Kivity0d673e32012-10-02 15:28:50 +02002918 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002919 mon_printf(f, "address-space: %s\n", as->name);
2920 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2921 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00002922 }
2923
Blue Swirl314e2982011-09-11 20:22:05 +00002924 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002925 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002926 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2927 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2928 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00002929 }
2930
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002931 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02002932 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002933 }
Blue Swirl314e2982011-09-11 20:22:05 +00002934}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002935
Peter Maydellb08199c2017-07-07 15:42:51 +01002936void memory_region_init_ram(MemoryRegion *mr,
2937 struct Object *owner,
2938 const char *name,
2939 uint64_t size,
2940 Error **errp)
2941{
2942 DeviceState *owner_dev;
2943 Error *err = NULL;
2944
2945 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
2946 if (err) {
2947 error_propagate(errp, err);
2948 return;
2949 }
2950 /* This will assert if owner is neither NULL nor a DeviceState.
2951 * We only want the owner here for the purposes of defining a
2952 * unique name for migration. TODO: Ideally we should implement
2953 * a naming scheme for Objects which are not DeviceStates, in
2954 * which case we can relax this restriction.
2955 */
2956 owner_dev = DEVICE(owner);
2957 vmstate_register_ram(mr, owner_dev);
2958}
2959
2960void memory_region_init_rom(MemoryRegion *mr,
2961 struct Object *owner,
2962 const char *name,
2963 uint64_t size,
2964 Error **errp)
2965{
2966 DeviceState *owner_dev;
2967 Error *err = NULL;
2968
2969 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
2970 if (err) {
2971 error_propagate(errp, err);
2972 return;
2973 }
2974 /* This will assert if owner is neither NULL nor a DeviceState.
2975 * We only want the owner here for the purposes of defining a
2976 * unique name for migration. TODO: Ideally we should implement
2977 * a naming scheme for Objects which are not DeviceStates, in
2978 * which case we can relax this restriction.
2979 */
2980 owner_dev = DEVICE(owner);
2981 vmstate_register_ram(mr, owner_dev);
2982}
2983
2984void memory_region_init_rom_device(MemoryRegion *mr,
2985 struct Object *owner,
2986 const MemoryRegionOps *ops,
2987 void *opaque,
2988 const char *name,
2989 uint64_t size,
2990 Error **errp)
2991{
2992 DeviceState *owner_dev;
2993 Error *err = NULL;
2994
2995 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
2996 name, size, &err);
2997 if (err) {
2998 error_propagate(errp, err);
2999 return;
3000 }
3001 /* This will assert if owner is neither NULL nor a DeviceState.
3002 * We only want the owner here for the purposes of defining a
3003 * unique name for migration. TODO: Ideally we should implement
3004 * a naming scheme for Objects which are not DeviceStates, in
3005 * which case we can relax this restriction.
3006 */
3007 owner_dev = DEVICE(owner);
3008 vmstate_register_ram(mr, owner_dev);
3009}
3010
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003011static const TypeInfo memory_region_info = {
3012 .parent = TYPE_OBJECT,
3013 .name = TYPE_MEMORY_REGION,
3014 .instance_size = sizeof(MemoryRegion),
3015 .instance_init = memory_region_initfn,
3016 .instance_finalize = memory_region_finalize,
3017};
3018
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003019static const TypeInfo iommu_memory_region_info = {
3020 .parent = TYPE_MEMORY_REGION,
3021 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003022 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003023 .instance_size = sizeof(IOMMUMemoryRegion),
3024 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003025 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003026};
3027
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003028static void memory_register_types(void)
3029{
3030 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003031 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003032}
3033
3034type_init(memory_register_types)