blob: 7fd93b1d42def4953c246a226b79e86d3fe24419 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010019#include "exec/memory.h"
20#include "exec/address-spaces.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070021#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010022#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030023#include "qemu/error-report.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +020024#include "qemu/main-loop.h"
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +020025#include "qemu/qemu-print.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Markus Armbruster54d31232019-08-12 07:23:59 +020032#include "sysemu/runstate.h"
Markus Armbruster14a48c12019-05-23 16:35:05 +020033#include "sysemu/tcg.h"
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +100034#include "sysemu/accel.h"
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +100035#include "hw/boards.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010036#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020037
Paolo Bonzinid1970632013-05-24 13:23:38 +020038//#define DEBUG_UNASSIGNED
39
Jan Kiszka22bde712012-11-05 16:45:56 +010040static unsigned memory_region_transaction_depth;
41static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080042static bool ioeventfd_update_pending;
Peter Xuae7a2bc2019-06-03 14:50:48 +080043bool global_dirty_log;
Avi Kivity7664e802011-12-11 14:47:25 +020044
Paolo Bonzinieae3eb32018-12-06 13:10:34 +010045static QTAILQ_HEAD(, MemoryListener) memory_listeners
Avi Kivity72e22d22012-02-08 15:05:50 +020046 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030047
Avi Kivity0d673e32012-10-02 15:28:50 +020048static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100051static GHashTable *flat_views;
52
Avi Kivity093bc2c2011-07-26 14:26:01 +030053typedef struct AddrRange AddrRange;
54
Avi Kivity8417ceb2011-08-03 11:56:14 +030055/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080056 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030057 * (large MemoryRegion::alias_offset).
58 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030059struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020060 Int128 start;
61 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030062};
63
Avi Kivity08dafab2011-10-16 13:19:17 +020064static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030065{
66 return (AddrRange) { start, size };
67}
68
69static bool addrrange_equal(AddrRange r1, AddrRange r2)
70{
Avi Kivity08dafab2011-10-16 13:19:17 +020071 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030072}
73
Avi Kivity08dafab2011-10-16 13:19:17 +020074static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030075{
Avi Kivity08dafab2011-10-16 13:19:17 +020076 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030077}
78
Avi Kivity08dafab2011-10-16 13:19:17 +020079static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030080{
Avi Kivity08dafab2011-10-16 13:19:17 +020081 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030082 return range;
83}
84
Avi Kivity08dafab2011-10-16 13:19:17 +020085static bool addrrange_contains(AddrRange range, Int128 addr)
86{
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
89}
90
Avi Kivity093bc2c2011-07-26 14:26:01 +030091static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92{
Avi Kivity08dafab2011-10-16 13:19:17 +020093 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030095}
96
97static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98{
Avi Kivity08dafab2011-10-16 13:19:17 +020099 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300102}
103
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200104enum ListenerDirection { Forward, Reverse };
105
Avi Kivity7376e582012-02-08 21:05:17 +0200106#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200107 do { \
108 MemoryListener *_listener; \
109 \
110 switch (_direction) { \
111 case Forward: \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
115 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200116 } \
117 break; \
118 case Reverse: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
Paolo Bonzini9a546352016-09-22 16:23:06 +0200130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200131 do { \
132 MemoryListener *_listener; \
133 \
134 switch (_direction) { \
135 case Forward: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200137 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
Paolo Bonzinieae3eb32018-12-06 13:10:34 +0100143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200144 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200156 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200160 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200161
Avi Kivity093bc2c2011-07-26 14:26:01 +0300162struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165};
166
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300167struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200171 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300172};
173
Tristan Burgess73bb7532018-05-28 23:04:45 -0400174static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300176{
Tristan Burgess73bb7532018-05-28 23:04:45 -0400177 if (int128_lt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300178 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400179 } else if (int128_gt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400181 } else if (int128_lt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400183 } else if (int128_gt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300184 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400185 } else if (a->match_data < b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300186 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400187 } else if (a->match_data > b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300188 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400189 } else if (a->match_data) {
190 if (a->data < b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300191 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400192 } else if (a->data > b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300193 return false;
194 }
195 }
Tristan Burgess73bb7532018-05-28 23:04:45 -0400196 if (a->e < b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300197 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400198 } else if (a->e > b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300199 return false;
200 }
201 return false;
202}
203
Tristan Burgess73bb7532018-05-28 23:04:45 -0400204static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300206{
207 return !memory_region_ioeventfd_before(a, b)
208 && !memory_region_ioeventfd_before(b, a);
209}
210
Avi Kivity093bc2c2011-07-26 14:26:01 +0300211/* Range of memory in the global map. Addresses are absolute. */
212struct FlatRange {
213 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200214 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300215 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300216 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200217 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300218 bool readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400219 bool nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300220};
221
Avi Kivity093bc2c2011-07-26 14:26:01 +0300222#define FOR_EACH_FLAT_RANGE(var, view) \
223 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
224
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200225static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000226section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200227{
228 return (MemoryRegionSection) {
229 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000230 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200231 .offset_within_region = fr->offset_in_region,
232 .size = fr->addr.size,
233 .offset_within_address_space = int128_get64(fr->addr.start),
234 .readonly = fr->readonly,
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400235 .nonvolatile = fr->nonvolatile,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200236 };
237}
238
Avi Kivity093bc2c2011-07-26 14:26:01 +0300239static bool flatrange_equal(FlatRange *a, FlatRange *b)
240{
241 return a->mr == b->mr
242 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300243 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200244 && a->romd_mode == b->romd_mode
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400245 && a->readonly == b->readonly
246 && a->nonvolatile == b->nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300247}
248
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000249static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300250{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000251 FlatView *view;
252
253 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200254 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000255 view->root = mr_root;
256 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200257 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000258
259 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300260}
261
262/* Insert a range into a given position. Caller is responsible for maintaining
263 * sorting order.
264 */
265static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
266{
267 if (view->nr == view->nr_allocated) {
268 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500269 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300270 view->nr_allocated * sizeof(*view->ranges));
271 }
272 memmove(view->ranges + pos + 1, view->ranges + pos,
273 (view->nr - pos) * sizeof(FlatRange));
274 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200275 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300276 ++view->nr;
277}
278
279static void flatview_destroy(FlatView *view)
280{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200281 int i;
282
Paolo Bonzini02d96512017-09-21 12:34:00 +0200283 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000284 if (view->dispatch) {
285 address_space_dispatch_free(view->dispatch);
286 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200287 for (i = 0; i < view->nr; i++) {
288 memory_region_unref(view->ranges[i].mr);
289 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500290 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000291 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200292 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300293}
294
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200295static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200296{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200297 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200298}
299
Paolo Bonzini48564042018-03-18 18:26:36 +0100300void flatview_unref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200301{
302 if (atomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200303 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000304 assert(view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000305 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200306 }
307}
308
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300309static bool can_merge(FlatRange *r1, FlatRange *r2)
310{
Avi Kivity08dafab2011-10-16 13:19:17 +0200311 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300312 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200313 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
314 r1->addr.size),
315 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300316 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200317 && r1->romd_mode == r2->romd_mode
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400318 && r1->readonly == r2->readonly
319 && r1->nonvolatile == r2->nonvolatile;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300320}
321
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000322/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300323static void flatview_simplify(FlatView *view)
324{
King Wang838ec112019-07-12 14:52:41 +0800325 unsigned i, j, k;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300326
327 i = 0;
328 while (i < view->nr) {
329 j = i + 1;
330 while (j < view->nr
331 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200332 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300333 ++j;
334 }
335 ++i;
King Wang838ec112019-07-12 14:52:41 +0800336 for (k = i; k < j; k++) {
337 memory_region_unref(view->ranges[k].mr);
338 }
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300339 memmove(&view->ranges[i], &view->ranges[j],
340 (view->nr - j) * sizeof(view->ranges[j]));
341 view->nr -= j - i;
342 }
343}
344
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200345static bool memory_region_big_endian(MemoryRegion *mr)
346{
347#ifdef TARGET_WORDS_BIGENDIAN
348 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
349#else
350 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
351#endif
352}
353
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200354static bool memory_region_wrong_endianness(MemoryRegion *mr)
355{
356#ifdef TARGET_WORDS_BIGENDIAN
357 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
358#else
359 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
360#endif
361}
362
363static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
364{
365 if (memory_region_wrong_endianness(mr)) {
366 switch (size) {
367 case 1:
368 break;
369 case 2:
370 *data = bswap16(*data);
371 break;
372 case 4:
373 *data = bswap32(*data);
374 break;
375 case 8:
376 *data = bswap64(*data);
377 break;
378 default:
379 abort();
380 }
381 }
382}
383
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200384static inline void memory_region_shift_read_access(uint64_t *value,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200385 signed shift,
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200386 uint64_t mask,
387 uint64_t tmp)
388{
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200389 if (shift >= 0) {
390 *value |= (tmp & mask) << shift;
391 } else {
392 *value |= (tmp & mask) >> -shift;
393 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200394}
395
396static inline uint64_t memory_region_shift_write_access(uint64_t *value,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200397 signed shift,
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200398 uint64_t mask)
399{
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200400 uint64_t tmp;
401
402 if (shift >= 0) {
403 tmp = (*value >> shift) & mask;
404 } else {
405 tmp = (*value << -shift) & mask;
406 }
407
408 return tmp;
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200409}
410
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800411static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
412{
413 MemoryRegion *root;
414 hwaddr abs_addr = offset;
415
416 abs_addr += mr->addr;
417 for (root = mr; root->container; ) {
418 root = root->container;
419 abs_addr += root->addr;
420 }
421
422 return abs_addr;
423}
424
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800425static int get_cpu_index(void)
426{
427 if (current_cpu) {
428 return current_cpu->cpu_index;
429 }
430 return -1;
431}
432
Peter Maydellcc05c432015-04-26 16:49:23 +0100433static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
434 hwaddr addr,
435 uint64_t *value,
436 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200437 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100438 uint64_t mask,
439 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300440{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300441 uint64_t tmp;
442
443 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800444 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800445 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800446 } else if (mr == &io_mem_notdirty) {
447 /* Accesses to code which has previously been translated into a TB show
448 * up in the MMIO path, as accesses to the io_mem_notdirty
449 * MemoryRegion. */
450 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800451 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
452 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800453 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800454 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200455 memory_region_shift_read_access(value, shift, mask, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100456 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300457}
458
Peter Maydellcc05c432015-04-26 16:49:23 +0100459static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
460 hwaddr addr,
461 uint64_t *value,
462 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200463 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100464 uint64_t mask,
465 MemTxAttrs attrs)
466{
467 uint64_t tmp = 0;
468 MemTxResult r;
469
Peter Maydellcc05c432015-04-26 16:49:23 +0100470 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800471 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800472 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800473 } else if (mr == &io_mem_notdirty) {
474 /* Accesses to code which has previously been translated into a TB show
475 * up in the MMIO path, as accesses to the io_mem_notdirty
476 * MemoryRegion. */
477 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800478 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
479 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800480 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800481 }
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200482 memory_region_shift_read_access(value, shift, mask, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100483 return r;
484}
485
Peter Maydellcc05c432015-04-26 16:49:23 +0100486static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
487 hwaddr addr,
488 uint64_t *value,
489 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200490 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100491 uint64_t mask,
492 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300493{
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200494 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
Avi Kivity164a4dc2011-08-11 10:40:25 +0300495
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800496 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800497 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800498 } else if (mr == &io_mem_notdirty) {
499 /* Accesses to code which has previously been translated into a TB show
500 * up in the MMIO path, as accesses to the io_mem_notdirty
501 * MemoryRegion. */
502 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800503 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
504 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800505 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800506 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300507 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100508 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300509}
510
Peter Maydellcc05c432015-04-26 16:49:23 +0100511static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
512 hwaddr addr,
513 uint64_t *value,
514 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200515 signed shift,
Peter Maydellcc05c432015-04-26 16:49:23 +0100516 uint64_t mask,
517 MemTxAttrs attrs)
518{
Philippe Mathieu-Daudé3c754a92018-09-27 02:24:15 +0200519 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
Peter Maydellcc05c432015-04-26 16:49:23 +0100520
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800521 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800522 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800523 } else if (mr == &io_mem_notdirty) {
524 /* Accesses to code which has previously been translated into a TB show
525 * up in the MMIO path, as accesses to the io_mem_notdirty
526 * MemoryRegion. */
527 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800528 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
529 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800530 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800531 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100532 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
533}
534
535static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300536 uint64_t *value,
537 unsigned size,
538 unsigned access_size_min,
539 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200540 MemTxResult (*access_fn)
541 (MemoryRegion *mr,
542 hwaddr addr,
543 uint64_t *value,
544 unsigned size,
Philippe Mathieu-Daudé98f52cd2018-09-27 02:24:16 +0200545 signed shift,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200546 uint64_t mask,
547 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100548 MemoryRegion *mr,
549 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300550{
551 uint64_t access_mask;
552 unsigned access_size;
553 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100554 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300555
556 if (!access_size_min) {
557 access_size_min = 1;
558 }
559 if (!access_size_max) {
560 access_size_max = 4;
561 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200562
563 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300564 access_size = MAX(MIN(size, access_size_max), access_size_min);
Philippe Mathieu-Daudé36960b42018-09-27 02:24:14 +0200565 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200566 if (memory_region_big_endian(mr)) {
567 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200568 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100569 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200570 }
571 } else {
572 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200573 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100574 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200575 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300576 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100577 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300578}
579
Avi Kivitye2177952011-12-08 15:00:18 +0200580static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
581{
Avi Kivity0d673e32012-10-02 15:28:50 +0200582 AddressSpace *as;
583
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200584 while (mr->container) {
585 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200586 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200587 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
588 if (mr == as->root) {
589 return as;
590 }
Avi Kivitye2177952011-12-08 15:00:18 +0200591 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200592 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200593}
594
Avi Kivity093bc2c2011-07-26 14:26:01 +0300595/* Render a memory region into the global view. Ranges in @view obscure
596 * ranges in @mr.
597 */
598static void render_memory_region(FlatView *view,
599 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200600 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300601 AddrRange clip,
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400602 bool readonly,
603 bool nonvolatile)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300604{
605 MemoryRegion *subregion;
606 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200607 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200608 Int128 remain;
609 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300610 FlatRange fr;
611 AddrRange tmp;
612
Avi Kivity6bba19b2011-09-14 11:54:58 +0300613 if (!mr->enabled) {
614 return;
615 }
616
Avi Kivity08dafab2011-10-16 13:19:17 +0200617 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300618 readonly |= mr->readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400619 nonvolatile |= mr->nonvolatile;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300620
621 tmp = addrrange_make(base, mr->size);
622
623 if (!addrrange_intersects(tmp, clip)) {
624 return;
625 }
626
627 clip = addrrange_intersection(tmp, clip);
628
629 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200630 int128_subfrom(&base, int128_make64(mr->alias->addr));
631 int128_subfrom(&base, int128_make64(mr->alias_offset));
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400632 render_memory_region(view, mr->alias, base, clip,
633 readonly, nonvolatile);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300634 return;
635 }
636
637 /* Render subregions in priority order. */
638 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400639 render_memory_region(view, subregion, base, clip,
640 readonly, nonvolatile);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300641 }
642
Avi Kivity14a3c102011-07-26 14:26:06 +0300643 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300644 return;
645 }
646
Avi Kivity08dafab2011-10-16 13:19:17 +0200647 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300648 base = clip.start;
649 remain = clip.size;
650
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000651 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100652 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200653 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000654 fr.readonly = readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400655 fr.nonvolatile = nonvolatile;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000656
Avi Kivity093bc2c2011-07-26 14:26:01 +0300657 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200658 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
659 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300660 continue;
661 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200662 if (int128_lt(base, view->ranges[i].addr.start)) {
663 now = int128_min(remain,
664 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300665 fr.offset_in_region = offset_in_region;
666 fr.addr = addrrange_make(base, now);
667 flatview_insert(view, i, &fr);
668 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200669 int128_addto(&base, now);
670 offset_in_region += int128_get64(now);
671 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300672 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200673 now = int128_sub(int128_min(int128_add(base, remain),
674 addrrange_end(view->ranges[i].addr)),
675 base);
676 int128_addto(&base, now);
677 offset_in_region += int128_get64(now);
678 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300679 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200680 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300681 fr.offset_in_region = offset_in_region;
682 fr.addr = addrrange_make(base, remain);
683 flatview_insert(view, i, &fr);
684 }
685}
686
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000687static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
688{
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200689 while (mr->enabled) {
690 if (mr->alias) {
691 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
692 /* The alias is included in its entirety. Use it as
693 * the "real" root, so that we can share more FlatViews.
694 */
695 mr = mr->alias;
696 continue;
697 }
698 } else if (!mr->terminates) {
699 unsigned int found = 0;
700 MemoryRegion *child, *next = NULL;
701 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
702 if (child->enabled) {
703 if (++found > 1) {
704 next = NULL;
705 break;
706 }
707 if (!child->addr && int128_ge(mr->size, child->size)) {
708 /* A child is included in its entirety. If it's the only
709 * enabled one, use it in the hope of finding an alias down the
710 * way. This will also let us share FlatViews.
711 */
712 next = child;
713 }
714 }
715 }
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000716 if (found == 0) {
717 return NULL;
718 }
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200719 if (next) {
720 mr = next;
721 continue;
722 }
723 }
724
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000725 return mr;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000726 }
727
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000728 return NULL;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000729}
730
Avi Kivity093bc2c2011-07-26 14:26:01 +0300731/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200732static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300733{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000734 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200735 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300736
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000737 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300738
Avi Kivity83f3c252012-10-07 12:59:55 +0200739 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200740 render_memory_region(view, mr, int128_zero(),
Marc-André Lureauc26763f2018-10-03 15:44:52 +0400741 addrrange_make(int128_zero(), int128_2_64()),
742 false, false);
Avi Kivity83f3c252012-10-07 12:59:55 +0200743 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200744 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300745
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000746 view->dispatch = address_space_dispatch_new(view);
747 for (i = 0; i < view->nr; i++) {
748 MemoryRegionSection mrs =
749 section_from_flat_range(&view->ranges[i], view);
750 flatview_add_to_dispatch(view, &mrs);
751 }
752 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000753 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000754
Avi Kivity093bc2c2011-07-26 14:26:01 +0300755 return view;
756}
757
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300758static void address_space_add_del_ioeventfds(AddressSpace *as,
759 MemoryRegionIoeventfd *fds_new,
760 unsigned fds_new_nb,
761 MemoryRegionIoeventfd *fds_old,
762 unsigned fds_old_nb)
763{
764 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200765 MemoryRegionIoeventfd *fd;
766 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300767
768 /* Generate a symmetric difference of the old and new fd sets, adding
769 * and deleting as necessary.
770 */
771
772 iold = inew = 0;
773 while (iold < fds_old_nb || inew < fds_new_nb) {
774 if (iold < fds_old_nb
775 && (inew == fds_new_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400776 || memory_region_ioeventfd_before(&fds_old[iold],
777 &fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200778 fd = &fds_old[iold];
779 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000780 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200781 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200782 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200783 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200784 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200785 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300786 ++iold;
787 } else if (inew < fds_new_nb
788 && (iold == fds_old_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400789 || memory_region_ioeventfd_before(&fds_new[inew],
790 &fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200791 fd = &fds_new[inew];
792 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000793 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200794 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200795 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200796 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200797 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200798 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300799 ++inew;
800 } else {
801 ++iold;
802 ++inew;
803 }
804 }
805}
806
Paolo Bonzini48564042018-03-18 18:26:36 +0100807FlatView *address_space_get_flatview(AddressSpace *as)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200808{
809 FlatView *view;
810
Paolo Bonzini374f2982013-05-17 12:37:03 +0200811 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200812 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000813 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200814 /* If somebody has replaced as->current_map concurrently,
815 * flatview_ref returns false.
816 */
817 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200818 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200819 return view;
820}
821
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300822static void address_space_update_ioeventfds(AddressSpace *as)
823{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200824 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300825 FlatRange *fr;
826 unsigned ioeventfd_nb = 0;
827 MemoryRegionIoeventfd *ioeventfds = NULL;
828 AddrRange tmp;
829 unsigned i;
830
Paolo Bonzini856d7242013-05-06 11:57:21 +0200831 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200832 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300833 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
834 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200835 int128_sub(fr->addr.start,
836 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300837 if (addrrange_intersects(fr->addr, tmp)) {
838 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500839 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300840 ioeventfd_nb * sizeof(*ioeventfds));
841 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
842 ioeventfds[ioeventfd_nb-1].addr = tmp;
843 }
844 }
845 }
846
847 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
848 as->ioeventfds, as->ioeventfd_nb);
849
Anthony Liguori7267c092011-08-20 22:09:37 -0500850 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300851 as->ioeventfds = ioeventfds;
852 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200853 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300854}
855
Peter Xu23f11742019-08-20 22:13:25 +0800856/*
857 * Notify the memory listeners about the coalesced IO change events of
858 * range `cmr'. Only the part that has intersection of the specified
859 * FlatRange will be sent.
860 */
861static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
862 CoalescedMemoryRange *cmr, bool add)
863{
864 AddrRange tmp;
865
866 tmp = addrrange_shift(cmr->addr,
867 int128_sub(fr->addr.start,
868 int128_make64(fr->offset_in_region)));
869 if (!addrrange_intersects(tmp, fr->addr)) {
870 return;
871 }
872 tmp = addrrange_intersection(tmp, fr->addr);
873
874 if (add) {
875 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
876 int128_get64(tmp.start),
877 int128_get64(tmp.size));
878 } else {
879 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
880 int128_get64(tmp.start),
881 int128_get64(tmp.size));
882 }
883}
884
Paolo Bonzini909bf762018-11-28 10:42:06 +0100885static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
886{
Peter Xu23f11742019-08-20 22:13:25 +0800887 CoalescedMemoryRange *cmr;
888
Peter Xu23f11742019-08-20 22:13:25 +0800889 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
890 flat_range_coalesced_io_notify(fr, as, cmr, false);
891 }
Paolo Bonzini909bf762018-11-28 10:42:06 +0100892}
893
894static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
895{
896 MemoryRegion *mr = fr->mr;
897 CoalescedMemoryRange *cmr;
Paolo Bonzini909bf762018-11-28 10:42:06 +0100898
Paolo Bonzini1f7af802018-11-28 17:29:45 +0100899 if (QTAILQ_EMPTY(&mr->coalesced)) {
900 return;
901 }
902
Paolo Bonzini909bf762018-11-28 10:42:06 +0100903 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
Peter Xu23f11742019-08-20 22:13:25 +0800904 flat_range_coalesced_io_notify(fr, as, cmr, true);
Paolo Bonzini909bf762018-11-28 10:42:06 +0100905 }
906}
907
Avi Kivityb8af1af2011-07-26 14:26:12 +0300908static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200909 const FlatView *old_view,
910 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300911 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300912{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300913 unsigned iold, inew;
914 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300915
916 /* Generate a symmetric difference of the old and new memory maps.
917 * Kill ranges in the old map, and instantiate ranges in the new map.
918 */
919 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200920 while (iold < old_view->nr || inew < new_view->nr) {
921 if (iold < old_view->nr) {
922 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300923 } else {
924 frold = NULL;
925 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200926 if (inew < new_view->nr) {
927 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300928 } else {
929 frnew = NULL;
930 }
931
932 if (frold
933 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200934 || int128_lt(frold->addr.start, frnew->addr.start)
935 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300936 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000937 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300938
Avi Kivityb8af1af2011-07-26 14:26:12 +0300939 if (!adding) {
Paolo Bonzini3ac7d432018-11-28 17:28:45 +0100940 flat_range_coalesced_io_del(frold, as);
Avi Kivity72e22d22012-02-08 15:05:50 +0200941 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300942 }
943
Avi Kivity093bc2c2011-07-26 14:26:01 +0300944 ++iold;
945 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000946 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300947
Jagannathan Raman4f826022019-02-05 17:50:19 -0500948 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200949 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200950 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
951 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
952 frold->dirty_log_mask,
953 frnew->dirty_log_mask);
954 }
955 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
956 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
957 frold->dirty_log_mask,
958 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300959 }
Avi Kivity5a583342011-07-26 14:26:02 +0300960 }
961
Avi Kivity093bc2c2011-07-26 14:26:01 +0300962 ++iold;
963 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300964 } else {
965 /* In new */
966
Avi Kivityb8af1af2011-07-26 14:26:12 +0300967 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200968 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Paolo Bonzini3ac7d432018-11-28 17:28:45 +0100969 flat_range_coalesced_io_add(frnew, as);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300970 }
971
Avi Kivity093bc2c2011-07-26 14:26:01 +0300972 ++inew;
973 }
974 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300975}
976
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000977static void flatviews_init(void)
978{
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000979 static FlatView *empty_view;
980
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000981 if (flat_views) {
982 return;
983 }
984
985 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
986 (GDestroyNotify) flatview_unref);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000987 if (!empty_view) {
988 empty_view = generate_memory_topology(NULL);
989 /* We keep it alive forever in the global variable. */
990 flatview_ref(empty_view);
991 } else {
992 g_hash_table_replace(flat_views, NULL, empty_view);
993 flatview_ref(empty_view);
994 }
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000995}
996
997static void flatviews_reset(void)
998{
999 AddressSpace *as;
1000
1001 if (flat_views) {
1002 g_hash_table_unref(flat_views);
1003 flat_views = NULL;
1004 }
1005 flatviews_init();
1006
1007 /* Render unique FVs */
1008 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1009 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1010
1011 if (g_hash_table_lookup(flat_views, physmr)) {
1012 continue;
1013 }
1014
1015 generate_memory_topology(physmr);
1016 }
1017}
1018
1019static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +03001020{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001021 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001022 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1023 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1024
1025 assert(new_view);
1026
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001027 if (old_view == new_view) {
1028 return;
1029 }
1030
1031 if (old_view) {
1032 flatview_ref(old_view);
1033 }
1034
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001035 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001036
1037 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001038 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1039
1040 if (!old_view2) {
1041 old_view2 = &tmpview;
1042 }
1043 address_space_update_topology_pass(as, old_view2, new_view, false);
1044 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001045 }
Avi Kivityb8af1af2011-07-26 14:26:12 +03001046
Paolo Bonzini374f2982013-05-17 12:37:03 +02001047 /* Writes are protected by the BQL. */
1048 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001049 if (old_view) {
1050 flatview_unref(old_view);
1051 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001052
1053 /* Note that all the old MemoryRegions are still alive up to this
1054 * point. This relieves most MemoryListeners from the need to
1055 * ref/unref the MemoryRegions they get---unless they use them
1056 * outside the iothread mutex, in which case precise reference
1057 * counting is necessary.
1058 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001059 if (old_view) {
1060 flatview_unref(old_view);
1061 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001062}
1063
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001064static void address_space_update_topology(AddressSpace *as)
1065{
1066 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1067
1068 flatviews_init();
1069 if (!g_hash_table_lookup(flat_views, physmr)) {
1070 generate_memory_topology(physmr);
1071 }
1072 address_space_set_flatview(as);
1073}
1074
Avi Kivity4ef4db82011-07-26 14:26:13 +03001075void memory_region_transaction_begin(void)
1076{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001077 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001078 ++memory_region_transaction_depth;
1079}
1080
1081void memory_region_transaction_commit(void)
1082{
Avi Kivity0d673e32012-10-02 15:28:50 +02001083 AddressSpace *as;
1084
Avi Kivity4ef4db82011-07-26 14:26:13 +03001085 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001086 assert(qemu_mutex_iothread_locked());
1087
Avi Kivity4ef4db82011-07-26 14:26:13 +03001088 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001089 if (!memory_region_transaction_depth) {
1090 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001091 flatviews_reset();
1092
Gonglei4dc56152014-05-08 11:47:32 +08001093 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001094
Gonglei4dc56152014-05-08 11:47:32 +08001095 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001096 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001097 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001098 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001099 memory_region_update_pending = false;
linzhecheng0b152092018-01-14 20:55:19 +08001100 ioeventfd_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001101 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1102 } else if (ioeventfd_update_pending) {
1103 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1104 address_space_update_ioeventfds(as);
1105 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001106 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001107 }
Gonglei4dc56152014-05-08 11:47:32 +08001108 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001109}
1110
Avi Kivity545e92e2011-08-08 19:58:48 +03001111static void memory_region_destructor_none(MemoryRegion *mr)
1112{
1113}
1114
1115static void memory_region_destructor_ram(MemoryRegion *mr)
1116{
Fam Zhengf1060c52016-03-01 14:18:22 +08001117 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001118}
1119
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001120static bool memory_region_need_escape(char c)
1121{
1122 return c == '/' || c == '[' || c == '\\' || c == ']';
1123}
1124
1125static char *memory_region_escape_name(const char *name)
1126{
1127 const char *p;
1128 char *escaped, *q;
1129 uint8_t c;
1130 size_t bytes = 0;
1131
1132 for (p = name; *p; p++) {
1133 bytes += memory_region_need_escape(*p) ? 4 : 1;
1134 }
1135 if (bytes == p - name) {
1136 return g_memdup(name, bytes + 1);
1137 }
1138
1139 escaped = g_malloc(bytes + 1);
1140 for (p = name, q = escaped; *p; p++) {
1141 c = *p;
1142 if (unlikely(memory_region_need_escape(c))) {
1143 *q++ = '\\';
1144 *q++ = 'x';
1145 *q++ = "0123456789abcdef"[c >> 4];
1146 c = "0123456789abcdef"[c & 15];
1147 }
1148 *q++ = c;
1149 }
1150 *q = 0;
1151 return escaped;
1152}
1153
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001154static void memory_region_do_init(MemoryRegion *mr,
1155 Object *owner,
1156 const char *name,
1157 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001158{
Avi Kivity08dafab2011-10-16 13:19:17 +02001159 mr->size = int128_make64(size);
1160 if (size == UINT64_MAX) {
1161 mr->size = int128_2_64();
1162 }
Peter Maydell302fa282014-08-19 20:05:46 +01001163 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001164 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001165 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001166
1167 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001168 char *escaped_name = memory_region_escape_name(name);
1169 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001170
1171 if (!owner) {
1172 owner = container_get(qdev_get_machine(), "/unattached");
1173 }
1174
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001175 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001176 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001177 g_free(name_array);
1178 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001179 }
1180}
1181
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001182void memory_region_init(MemoryRegion *mr,
1183 Object *owner,
1184 const char *name,
1185 uint64_t size)
1186{
1187 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1188 memory_region_do_init(mr, owner, name, size);
1189}
1190
Eric Blaked7bce992016-01-29 06:48:55 -07001191static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1192 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001193{
1194 MemoryRegion *mr = MEMORY_REGION(obj);
1195 uint64_t value = mr->addr;
1196
Eric Blake51e72bc2016-01-29 06:48:54 -07001197 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001198}
1199
Eric Blaked7bce992016-01-29 06:48:55 -07001200static void memory_region_get_container(Object *obj, Visitor *v,
1201 const char *name, void *opaque,
1202 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001203{
1204 MemoryRegion *mr = MEMORY_REGION(obj);
1205 gchar *path = (gchar *)"";
1206
1207 if (mr->container) {
1208 path = object_get_canonical_path(OBJECT(mr->container));
1209 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001210 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001211 if (mr->container) {
1212 g_free(path);
1213 }
1214}
1215
1216static Object *memory_region_resolve_container(Object *obj, void *opaque,
1217 const char *part)
1218{
1219 MemoryRegion *mr = MEMORY_REGION(obj);
1220
1221 return OBJECT(mr->container);
1222}
1223
Eric Blaked7bce992016-01-29 06:48:55 -07001224static void memory_region_get_priority(Object *obj, Visitor *v,
1225 const char *name, void *opaque,
1226 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001227{
1228 MemoryRegion *mr = MEMORY_REGION(obj);
1229 int32_t value = mr->priority;
1230
Eric Blake51e72bc2016-01-29 06:48:54 -07001231 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001232}
1233
Eric Blaked7bce992016-01-29 06:48:55 -07001234static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1235 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001236{
1237 MemoryRegion *mr = MEMORY_REGION(obj);
1238 uint64_t value = memory_region_size(mr);
1239
Eric Blake51e72bc2016-01-29 06:48:54 -07001240 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001241}
1242
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001243static void memory_region_initfn(Object *obj)
1244{
1245 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001246 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001247
1248 mr->ops = &unassigned_mem_ops;
1249 mr->enabled = true;
1250 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001251 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001252 mr->destructor = memory_region_destructor_none;
1253 QTAILQ_INIT(&mr->subregions);
1254 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001255
1256 op = object_property_add(OBJECT(mr), "container",
1257 "link<" TYPE_MEMORY_REGION ">",
1258 memory_region_get_container,
1259 NULL, /* memory_region_set_container */
1260 NULL, NULL, &error_abort);
1261 op->resolve = memory_region_resolve_container;
1262
1263 object_property_add(OBJECT(mr), "addr", "uint64",
1264 memory_region_get_addr,
1265 NULL, /* memory_region_set_addr */
1266 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001267 object_property_add(OBJECT(mr), "priority", "uint32",
1268 memory_region_get_priority,
1269 NULL, /* memory_region_set_priority */
1270 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001271 object_property_add(OBJECT(mr), "size", "uint64",
1272 memory_region_get_size,
1273 NULL, /* memory_region_set_size, */
1274 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001275}
1276
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001277static void iommu_memory_region_initfn(Object *obj)
1278{
1279 MemoryRegion *mr = MEMORY_REGION(obj);
1280
1281 mr->is_iommu = true;
1282}
1283
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001284static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1285 unsigned size)
1286{
1287#ifdef DEBUG_UNASSIGNED
1288 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1289#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001290 if (current_cpu != NULL) {
Peter Maydelldbea78a2018-08-14 17:17:19 +01001291 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1292 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001293 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001294 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001295}
1296
1297static void unassigned_mem_write(void *opaque, hwaddr addr,
1298 uint64_t val, unsigned size)
1299{
1300#ifdef DEBUG_UNASSIGNED
1301 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1302#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001303 if (current_cpu != NULL) {
1304 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001305 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001306}
1307
Paolo Bonzinid1970632013-05-24 13:23:38 +02001308static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
Peter Maydell8372d382018-05-31 14:50:52 +01001309 unsigned size, bool is_write,
1310 MemTxAttrs attrs)
Paolo Bonzinid1970632013-05-24 13:23:38 +02001311{
1312 return false;
1313}
1314
1315const MemoryRegionOps unassigned_mem_ops = {
1316 .valid.accepts = unassigned_mem_accepts,
1317 .endianness = DEVICE_NATIVE_ENDIAN,
1318};
1319
Alex Williamson4a2e2422016-10-31 09:53:03 -06001320static uint64_t memory_region_ram_device_read(void *opaque,
1321 hwaddr addr, unsigned size)
1322{
1323 MemoryRegion *mr = opaque;
1324 uint64_t data = (uint64_t)~0;
1325
1326 switch (size) {
1327 case 1:
1328 data = *(uint8_t *)(mr->ram_block->host + addr);
1329 break;
1330 case 2:
1331 data = *(uint16_t *)(mr->ram_block->host + addr);
1332 break;
1333 case 4:
1334 data = *(uint32_t *)(mr->ram_block->host + addr);
1335 break;
1336 case 8:
1337 data = *(uint64_t *)(mr->ram_block->host + addr);
1338 break;
1339 }
1340
1341 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1342
1343 return data;
1344}
1345
1346static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1347 uint64_t data, unsigned size)
1348{
1349 MemoryRegion *mr = opaque;
1350
1351 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1352
1353 switch (size) {
1354 case 1:
1355 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1356 break;
1357 case 2:
1358 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1359 break;
1360 case 4:
1361 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1362 break;
1363 case 8:
1364 *(uint64_t *)(mr->ram_block->host + addr) = data;
1365 break;
1366 }
1367}
1368
1369static const MemoryRegionOps ram_device_mem_ops = {
1370 .read = memory_region_ram_device_read,
1371 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001372 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001373 .valid = {
1374 .min_access_size = 1,
1375 .max_access_size = 8,
1376 .unaligned = true,
1377 },
1378 .impl = {
1379 .min_access_size = 1,
1380 .max_access_size = 8,
1381 .unaligned = true,
1382 },
1383};
1384
Paolo Bonzinid2702032013-05-24 11:55:06 +02001385bool memory_region_access_valid(MemoryRegion *mr,
1386 hwaddr addr,
1387 unsigned size,
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001388 bool is_write,
1389 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001390{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001391 int access_size_min, access_size_max;
1392 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001393
Avi Kivity093bc2c2011-07-26 14:26:01 +03001394 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1395 return false;
1396 }
1397
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001398 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001399 return true;
1400 }
1401
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001402 access_size_min = mr->ops->valid.min_access_size;
1403 if (!mr->ops->valid.min_access_size) {
1404 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001405 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001406
1407 access_size_max = mr->ops->valid.max_access_size;
1408 if (!mr->ops->valid.max_access_size) {
1409 access_size_max = 4;
1410 }
1411
1412 access_size = MAX(MIN(size, access_size_max), access_size_min);
1413 for (i = 0; i < size; i += access_size) {
1414 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
Peter Maydell8372d382018-05-31 14:50:52 +01001415 is_write, attrs)) {
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001416 return false;
1417 }
1418 }
1419
Avi Kivity093bc2c2011-07-26 14:26:01 +03001420 return true;
1421}
1422
Peter Maydellcc05c432015-04-26 16:49:23 +01001423static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1424 hwaddr addr,
1425 uint64_t *pval,
1426 unsigned size,
1427 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001428{
Peter Maydellcc05c432015-04-26 16:49:23 +01001429 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001430
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001431 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001432 return access_with_adjusted_size(addr, pval, size,
1433 mr->ops->impl.min_access_size,
1434 mr->ops->impl.max_access_size,
1435 memory_region_read_accessor,
1436 mr, attrs);
Peter Maydell62a0db92018-08-24 18:04:20 +01001437 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001438 return access_with_adjusted_size(addr, pval, size,
1439 mr->ops->impl.min_access_size,
1440 mr->ops->impl.max_access_size,
1441 memory_region_read_with_attrs_accessor,
1442 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001443 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001444}
1445
Peter Maydell3b643492015-04-26 16:49:23 +01001446MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1447 hwaddr addr,
1448 uint64_t *pval,
1449 unsigned size,
1450 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001451{
Peter Maydellcc05c432015-04-26 16:49:23 +01001452 MemTxResult r;
1453
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001454 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001455 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001456 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001457 }
Avi Kivitya621f382012-01-02 13:12:08 +02001458
Peter Maydellcc05c432015-04-26 16:49:23 +01001459 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001460 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001461 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001462}
1463
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001464/* Return true if an eventfd was signalled */
1465static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1466 hwaddr addr,
1467 uint64_t data,
1468 unsigned size,
1469 MemTxAttrs attrs)
1470{
1471 MemoryRegionIoeventfd ioeventfd = {
1472 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1473 .data = data,
1474 };
1475 unsigned i;
1476
1477 for (i = 0; i < mr->ioeventfd_nb; i++) {
1478 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1479 ioeventfd.e = mr->ioeventfds[i].e;
1480
Tristan Burgess73bb7532018-05-28 23:04:45 -04001481 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001482 event_notifier_set(ioeventfd.e);
1483 return true;
1484 }
1485 }
1486
1487 return false;
1488}
1489
Peter Maydell3b643492015-04-26 16:49:23 +01001490MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1491 hwaddr addr,
1492 uint64_t data,
1493 unsigned size,
1494 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001495{
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001496 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001497 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001498 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001499 }
1500
Avi Kivitya621f382012-01-02 13:12:08 +02001501 adjust_endianness(mr, &data, size);
1502
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001503 if ((!kvm_eventfds_enabled()) &&
1504 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1505 return MEMTX_OK;
1506 }
1507
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001508 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001509 return access_with_adjusted_size(addr, &data, size,
1510 mr->ops->impl.min_access_size,
1511 mr->ops->impl.max_access_size,
1512 memory_region_write_accessor, mr,
1513 attrs);
Peter Maydell62a0db92018-08-24 18:04:20 +01001514 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001515 return
1516 access_with_adjusted_size(addr, &data, size,
1517 mr->ops->impl.min_access_size,
1518 mr->ops->impl.max_access_size,
1519 memory_region_write_with_attrs_accessor,
1520 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001521 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001522}
1523
Avi Kivity093bc2c2011-07-26 14:26:01 +03001524void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001525 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001526 const MemoryRegionOps *ops,
1527 void *opaque,
1528 const char *name,
1529 uint64_t size)
1530{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001531 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001532 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001533 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001534 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001535}
1536
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001537void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1538 Object *owner,
1539 const char *name,
1540 uint64_t size,
1541 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001542{
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001543 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1544}
1545
1546void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1547 Object *owner,
1548 const char *name,
1549 uint64_t size,
1550 bool share,
1551 Error **errp)
1552{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001553 Error *err = NULL;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001554 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001555 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001556 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001557 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001558 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001559 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001560 if (err) {
1561 mr->size = int128_zero();
1562 object_unparent(OBJECT(mr));
1563 error_propagate(errp, err);
1564 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001565}
1566
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001567void memory_region_init_resizeable_ram(MemoryRegion *mr,
1568 Object *owner,
1569 const char *name,
1570 uint64_t size,
1571 uint64_t max_size,
1572 void (*resized)(const char*,
1573 uint64_t length,
1574 void *host),
1575 Error **errp)
1576{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001577 Error *err = NULL;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001578 memory_region_init(mr, owner, name, size);
1579 mr->ram = true;
1580 mr->terminates = true;
1581 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001582 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001583 mr, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001584 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001585 if (err) {
1586 mr->size = int128_zero();
1587 object_unparent(OBJECT(mr));
1588 error_propagate(errp, err);
1589 }
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001590}
1591
Hikaru Nishidad5dbde42018-09-24 21:32:05 +09001592#ifdef CONFIG_POSIX
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001593void memory_region_init_ram_from_file(MemoryRegion *mr,
1594 struct Object *owner,
1595 const char *name,
1596 uint64_t size,
Haozhong Zhang98376842017-12-11 15:28:04 +08001597 uint64_t align,
Junyan Hecbfc0172018-07-18 15:47:58 +08001598 uint32_t ram_flags,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001599 const char *path,
1600 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001601{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001602 Error *err = NULL;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001603 memory_region_init(mr, owner, name, size);
1604 mr->ram = true;
1605 mr->terminates = true;
1606 mr->destructor = memory_region_destructor_ram;
Haozhong Zhang98376842017-12-11 15:28:04 +08001607 mr->align = align;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001608 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001609 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001610 if (err) {
1611 mr->size = int128_zero();
1612 object_unparent(OBJECT(mr));
1613 error_propagate(errp, err);
1614 }
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001615}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001616
1617void memory_region_init_ram_from_fd(MemoryRegion *mr,
1618 struct Object *owner,
1619 const char *name,
1620 uint64_t size,
1621 bool share,
1622 int fd,
1623 Error **errp)
1624{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001625 Error *err = NULL;
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001626 memory_region_init(mr, owner, name, size);
1627 mr->ram = true;
1628 mr->terminates = true;
1629 mr->destructor = memory_region_destructor_ram;
Junyan Hecbfc0172018-07-18 15:47:58 +08001630 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1631 share ? RAM_SHARED : 0,
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001632 fd, &err);
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001633 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001634 if (err) {
1635 mr->size = int128_zero();
1636 object_unparent(OBJECT(mr));
1637 error_propagate(errp, err);
1638 }
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001639}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001640#endif
1641
Avi Kivity093bc2c2011-07-26 14:26:01 +03001642void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001643 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001644 const char *name,
1645 uint64_t size,
1646 void *ptr)
1647{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001648 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001649 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001650 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001651 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001652 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001653
1654 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1655 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001656 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001657}
1658
Alex Williamson21e00fa2016-10-31 09:53:03 -06001659void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1660 Object *owner,
1661 const char *name,
1662 uint64_t size,
1663 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301664{
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001665 memory_region_init(mr, owner, name, size);
1666 mr->ram = true;
1667 mr->terminates = true;
Alex Williamson21e00fa2016-10-31 09:53:03 -06001668 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001669 mr->ops = &ram_device_mem_ops;
1670 mr->opaque = mr;
Singh, Brijesh2ddb89b2019-02-04 22:23:39 +00001671 mr->destructor = memory_region_destructor_ram;
1672 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1673 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1674 assert(ptr != NULL);
1675 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301676}
1677
Avi Kivity093bc2c2011-07-26 14:26:01 +03001678void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001679 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001680 const char *name,
1681 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001682 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001683 uint64_t size)
1684{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001685 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001686 mr->alias = orig;
1687 mr->alias_offset = offset;
1688}
1689
Peter Maydellb59821a2017-07-07 15:42:50 +01001690void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1691 struct Object *owner,
1692 const char *name,
1693 uint64_t size,
1694 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001695{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001696 Error *err = NULL;
Peter Maydella1777f72016-07-04 13:06:35 +01001697 memory_region_init(mr, owner, name, size);
1698 mr->ram = true;
1699 mr->readonly = true;
1700 mr->terminates = true;
1701 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001702 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
Peter Maydella1777f72016-07-04 13:06:35 +01001703 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001704 if (err) {
1705 mr->size = int128_zero();
1706 object_unparent(OBJECT(mr));
1707 error_propagate(errp, err);
1708 }
Peter Maydella1777f72016-07-04 13:06:35 +01001709}
1710
Peter Maydellb59821a2017-07-07 15:42:50 +01001711void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1712 Object *owner,
1713 const MemoryRegionOps *ops,
1714 void *opaque,
1715 const char *name,
1716 uint64_t size,
1717 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001718{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001719 Error *err = NULL;
Peter Maydell39e0b032016-07-04 13:06:35 +01001720 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001721 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001722 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001723 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001724 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001725 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001726 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001727 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1728 if (err) {
1729 mr->size = int128_zero();
1730 object_unparent(OBJECT(mr));
1731 error_propagate(errp, err);
1732 }
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001733}
1734
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001735void memory_region_init_iommu(void *_iommu_mr,
1736 size_t instance_size,
1737 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001738 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001739 const char *name,
1740 uint64_t size)
1741{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001742 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001743 struct MemoryRegion *mr;
1744
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001745 object_initialize(_iommu_mr, instance_size, mrtypename);
1746 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001747 memory_region_do_init(mr, owner, name, size);
1748 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001749 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001750 QLIST_INIT(&iommu_mr->iommu_notify);
1751 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001752}
1753
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001754static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001755{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001756 MemoryRegion *mr = MEMORY_REGION(obj);
1757
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001758 assert(!mr->container);
1759
1760 /* We know the region is not visible in any address space (it
1761 * does not have a container and cannot be a root either because
1762 * it has no references, so we can blindly clear mr->enabled.
1763 * memory_region_set_enabled instead could trigger a transaction
1764 * and cause an infinite loop.
1765 */
1766 mr->enabled = false;
1767 memory_region_transaction_begin();
1768 while (!QTAILQ_EMPTY(&mr->subregions)) {
1769 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1770 memory_region_del_subregion(mr, subregion);
1771 }
1772 memory_region_transaction_commit();
1773
Avi Kivity545e92e2011-08-08 19:58:48 +03001774 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001775 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001776 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001777 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001778}
1779
Paolo Bonzini803c0812013-05-07 06:59:09 +02001780Object *memory_region_owner(MemoryRegion *mr)
1781{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001782 Object *obj = OBJECT(mr);
1783 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001784}
1785
Paolo Bonzini46637be2013-05-07 09:06:00 +02001786void memory_region_ref(MemoryRegion *mr)
1787{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001788 /* MMIO callbacks most likely will access data that belongs
1789 * to the owner, hence the need to ref/unref the owner whenever
1790 * the memory region is in use.
1791 *
1792 * The memory region is a child of its owner. As long as the
1793 * owner doesn't call unparent itself on the memory region,
1794 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001795 * Memory regions without an owner are supposed to never go away;
1796 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001797 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001798 if (mr && mr->owner) {
1799 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001800 }
1801}
1802
1803void memory_region_unref(MemoryRegion *mr)
1804{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001805 if (mr && mr->owner) {
1806 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001807 }
1808}
1809
Avi Kivity093bc2c2011-07-26 14:26:01 +03001810uint64_t memory_region_size(MemoryRegion *mr)
1811{
Avi Kivity08dafab2011-10-16 13:19:17 +02001812 if (int128_eq(mr->size, int128_2_64())) {
1813 return UINT64_MAX;
1814 }
1815 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001816}
1817
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001818const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001819{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001820 if (!mr->name) {
1821 ((MemoryRegion *)mr)->name =
1822 object_get_canonical_path_component(OBJECT(mr));
1823 }
Peter Maydell302fa282014-08-19 20:05:46 +01001824 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001825}
1826
Alex Williamson21e00fa2016-10-31 09:53:03 -06001827bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301828{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001829 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301830}
1831
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001832uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001833{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001834 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001835 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001836 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1837 }
1838 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001839}
1840
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001841bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1842{
1843 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1844}
1845
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001846static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001847{
1848 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1849 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001850 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001851
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001852 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001853 flags |= iommu_notifier->notifier_flags;
1854 }
1855
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001856 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1857 imrc->notify_flag_changed(iommu_mr,
1858 iommu_mr->iommu_notify_flags,
1859 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001860 }
1861
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001862 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001863}
1864
Peter Xucdb30812016-09-23 13:02:26 +08001865void memory_region_register_iommu_notifier(MemoryRegion *mr,
1866 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001867{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001868 IOMMUMemoryRegion *iommu_mr;
1869
Jason Wangefcd38c2016-12-30 18:09:17 +08001870 if (mr->alias) {
1871 memory_region_register_iommu_notifier(mr->alias, n);
1872 return;
1873 }
1874
Peter Xucdb30812016-09-23 13:02:26 +08001875 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001876 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001877 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001878 assert(n->start <= n->end);
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001879 assert(n->iommu_idx >= 0 &&
1880 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1881
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001882 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1883 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001884}
1885
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001886uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001887{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001888 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1889
1890 if (imrc->get_min_page_size) {
1891 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001892 }
1893 return TARGET_PAGE_SIZE;
1894}
1895
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001896void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001897{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001898 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001899 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001900 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001901 IOMMUTLBEntry iotlb;
1902
Peter Xufaa362e2017-04-07 18:59:11 +08001903 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001904 if (imrc->replay) {
1905 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001906 return;
1907 }
1908
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001909 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001910
David Gibsona788f222015-09-30 12:13:55 +10001911 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Peter Maydell2c91bcf2018-06-15 14:57:16 +01001912 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
David Gibsona788f222015-09-30 12:13:55 +10001913 if (iotlb.perm != IOMMU_NONE) {
1914 n->notify(n, &iotlb);
1915 }
1916
1917 /* if (2^64 - MR size) < granularity, it's possible to get an
1918 * infinite loop here. This should catch such a wraparound */
1919 if ((addr + granularity) < addr) {
1920 break;
1921 }
1922 }
1923}
1924
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001925void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001926{
1927 IOMMUNotifier *notifier;
1928
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001929 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1930 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001931 }
1932}
1933
Peter Xucdb30812016-09-23 13:02:26 +08001934void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1935 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001936{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001937 IOMMUMemoryRegion *iommu_mr;
1938
Jason Wangefcd38c2016-12-30 18:09:17 +08001939 if (mr->alias) {
1940 memory_region_unregister_iommu_notifier(mr->alias, n);
1941 return;
1942 }
Peter Xucdb30812016-09-23 13:02:26 +08001943 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001944 iommu_mr = IOMMU_MEMORY_REGION(mr);
1945 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001946}
1947
Peter Xubd2bfa42017-04-07 18:59:10 +08001948void memory_region_notify_one(IOMMUNotifier *notifier,
1949 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001950{
Peter Xucdb30812016-09-23 13:02:26 +08001951 IOMMUNotifierFlag request_flags;
Yan Zhao03c71402019-06-25 11:21:18 +08001952 hwaddr entry_end = entry->iova + entry->addr_mask;
Peter Xucdb30812016-09-23 13:02:26 +08001953
Peter Xubd2bfa42017-04-07 18:59:10 +08001954 /*
1955 * Skip the notification if the notification does not overlap
1956 * with registered range.
1957 */
Yan Zhao03c71402019-06-25 11:21:18 +08001958 if (notifier->start > entry_end || notifier->end < entry->iova) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001959 return;
1960 }
Peter Xucdb30812016-09-23 13:02:26 +08001961
Yan Zhao03c71402019-06-25 11:21:18 +08001962 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
1963
Peter Xubd2bfa42017-04-07 18:59:10 +08001964 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001965 request_flags = IOMMU_NOTIFIER_MAP;
1966 } else {
1967 request_flags = IOMMU_NOTIFIER_UNMAP;
1968 }
1969
Peter Xubd2bfa42017-04-07 18:59:10 +08001970 if (notifier->notifier_flags & request_flags) {
1971 notifier->notify(notifier, entry);
1972 }
1973}
1974
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001975void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001976 int iommu_idx,
Peter Xubd2bfa42017-04-07 18:59:10 +08001977 IOMMUTLBEntry entry)
1978{
1979 IOMMUNotifier *iommu_notifier;
1980
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001981 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001982
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001983 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001984 if (iommu_notifier->iommu_idx == iommu_idx) {
1985 memory_region_notify_one(iommu_notifier, &entry);
1986 }
Peter Xucdb30812016-09-23 13:02:26 +08001987 }
David Gibson06866572013-05-14 19:13:56 +10001988}
1989
Alexey Kardashevskiyf1334de2018-02-06 11:08:24 -07001990int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1991 enum IOMMUMemoryRegionAttr attr,
1992 void *data)
1993{
1994 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1995
1996 if (!imrc->get_attr) {
1997 return -EINVAL;
1998 }
1999
2000 return imrc->get_attr(iommu_mr, attr, data);
2001}
2002
Peter Maydell21f40202018-06-15 14:57:15 +01002003int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2004 MemTxAttrs attrs)
2005{
2006 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2007
2008 if (!imrc->attrs_to_index) {
2009 return 0;
2010 }
2011
2012 return imrc->attrs_to_index(iommu_mr, attrs);
2013}
2014
2015int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2016{
2017 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2018
2019 if (!imrc->num_indexes) {
2020 return 1;
2021 }
2022
2023 return imrc->num_indexes(iommu_mr);
2024}
2025
Avi Kivity093bc2c2011-07-26 14:26:01 +03002026void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2027{
Avi Kivity5a583342011-07-26 14:26:02 +03002028 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02002029 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03002030
Paolo Bonzinidbddac62015-03-23 10:31:53 +01002031 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02002032 old_logging = mr->vga_logging_count;
2033 mr->vga_logging_count += log ? 1 : -1;
2034 if (!!old_logging == !!mr->vga_logging_count) {
2035 return;
2036 }
2037
Jan Kiszka59023ef2012-08-23 13:02:30 +02002038 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03002039 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01002040 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002041 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002042}
2043
Avi Kivitya8170e52012-10-23 12:30:10 +02002044void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2045 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002046{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002047 assert(mr->ram_block);
2048 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2049 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01002050 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002051}
2052
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002053static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002054{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002055 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02002056 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002057 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03002058 FlatRange *fr;
2059
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002060 /* If the same address space has multiple log_sync listeners, we
2061 * visit that address space's FlatView multiple times. But because
2062 * log_sync listeners are rare, it's still cheaper than walking each
2063 * address space once.
2064 */
2065 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2066 if (!listener->log_sync) {
2067 continue;
2068 }
2069 as = listener->address_space;
2070 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002071 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002072 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002073 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002074 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02002075 }
Avi Kivity5a583342011-07-26 14:26:02 +03002076 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002077 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03002078 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002079}
2080
Peter Xu077874e2019-06-03 14:50:51 +08002081void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2082 hwaddr len)
2083{
2084 MemoryRegionSection mrs;
2085 MemoryListener *listener;
2086 AddressSpace *as;
2087 FlatView *view;
2088 FlatRange *fr;
2089 hwaddr sec_start, sec_end, sec_size;
2090
2091 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2092 if (!listener->log_clear) {
2093 continue;
2094 }
2095 as = listener->address_space;
2096 view = address_space_get_flatview(as);
2097 FOR_EACH_FLAT_RANGE(fr, view) {
2098 if (!fr->dirty_log_mask || fr->mr != mr) {
2099 /*
2100 * Clear dirty bitmap operation only applies to those
2101 * regions whose dirty logging is at least enabled
2102 */
2103 continue;
2104 }
2105
2106 mrs = section_from_flat_range(fr, view);
2107
2108 sec_start = MAX(mrs.offset_within_region, start);
2109 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2110 sec_end = MIN(sec_end, start + len);
2111
2112 if (sec_start >= sec_end) {
2113 /*
2114 * If this memory region section has no intersection
2115 * with the requested range, skip.
2116 */
2117 continue;
2118 }
2119
2120 /* Valid case; shrink the section if needed */
2121 mrs.offset_within_address_space +=
2122 sec_start - mrs.offset_within_region;
2123 mrs.offset_within_region = sec_start;
2124 sec_size = sec_end - sec_start;
2125 mrs.size = int128_make64(sec_size);
2126 listener->log_clear(listener, &mrs);
2127 }
2128 flatview_unref(view);
2129 }
2130}
2131
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002132DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2133 hwaddr addr,
2134 hwaddr size,
2135 unsigned client)
2136{
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002137 DirtyBitmapSnapshot *snapshot;
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002138 assert(mr->ram_block);
2139 memory_region_sync_dirty_bitmap(mr);
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002140 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2141 memory_global_after_dirty_log_sync();
2142 return snapshot;
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002143}
2144
2145bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2146 hwaddr addr, hwaddr size)
2147{
2148 assert(mr->ram_block);
2149 return cpu_physical_memory_snapshot_get_dirty(snap,
2150 memory_region_get_ram_addr(mr) + addr, size);
2151}
2152
Avi Kivity093bc2c2011-07-26 14:26:01 +03002153void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2154{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002155 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002156 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002157 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01002158 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002159 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002160 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002161}
2162
Marc-André Lureauc26763f2018-10-03 15:44:52 +04002163void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2164{
2165 if (mr->nonvolatile != nonvolatile) {
2166 memory_region_transaction_begin();
2167 mr->nonvolatile = nonvolatile;
2168 memory_region_update_pending |= mr->enabled;
2169 memory_region_transaction_commit();
2170 }
2171}
2172
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002173void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002174{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002175 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002176 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002177 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01002178 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002179 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002180 }
2181}
2182
Avi Kivitya8170e52012-10-23 12:30:10 +02002183void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2184 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002185{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002186 assert(mr->ram_block);
2187 cpu_physical_memory_test_and_clear_dirty(
2188 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002189}
2190
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002191int memory_region_get_fd(MemoryRegion *mr)
2192{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002193 int fd;
2194
2195 rcu_read_lock();
2196 while (mr->alias) {
2197 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002198 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002199 fd = mr->ram_block->fd;
2200 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002201
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002202 return fd;
2203}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002204
Avi Kivity093bc2c2011-07-26 14:26:01 +03002205void *memory_region_get_ram_ptr(MemoryRegion *mr)
2206{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002207 void *ptr;
2208 uint64_t offset = 0;
2209
2210 rcu_read_lock();
2211 while (mr->alias) {
2212 offset += mr->alias_offset;
2213 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002214 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002215 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002216 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002217 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002218
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002219 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002220}
2221
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002222MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2223{
2224 RAMBlock *block;
2225
2226 block = qemu_ram_block_from_host(ptr, false, offset);
2227 if (!block) {
2228 return NULL;
2229 }
2230
2231 return block->mr;
2232}
2233
Fam Zheng7ebb2742016-03-01 14:18:20 +08002234ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2235{
2236 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2237}
2238
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002239void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2240{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002241 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002242
Gongleifa53a0e2016-05-10 10:04:59 +08002243 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002244}
2245
Peter Xub960fc12019-08-20 22:13:28 +08002246/*
2247 * Call proper memory listeners about the change on the newly
2248 * added/removed CoalescedMemoryRange.
2249 */
2250static void memory_region_update_coalesced_range(MemoryRegion *mr,
2251 CoalescedMemoryRange *cmr,
2252 bool add)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002253{
Peter Xub960fc12019-08-20 22:13:28 +08002254 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002255 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002256 FlatRange *fr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002257
Avi Kivity0d673e32012-10-02 15:28:50 +02002258 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Peter Xub960fc12019-08-20 22:13:28 +08002259 view = address_space_get_flatview(as);
2260 FOR_EACH_FLAT_RANGE(fr, view) {
2261 if (fr->mr == mr) {
2262 flat_range_coalesced_io_notify(fr, as, cmr, add);
2263 }
2264 }
2265 flatview_unref(view);
Avi Kivity0d673e32012-10-02 15:28:50 +02002266 }
2267}
2268
Avi Kivity093bc2c2011-07-26 14:26:01 +03002269void memory_region_set_coalescing(MemoryRegion *mr)
2270{
2271 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002272 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002273}
2274
2275void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002276 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002277 uint64_t size)
2278{
Anthony Liguori7267c092011-08-20 22:09:37 -05002279 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002280
Avi Kivity08dafab2011-10-16 13:19:17 +02002281 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002282 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
Peter Xub960fc12019-08-20 22:13:28 +08002283 memory_region_update_coalesced_range(mr, cmr, true);
Jan Kiszkad4105152012-08-23 13:02:29 +02002284 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002285}
2286
2287void memory_region_clear_coalescing(MemoryRegion *mr)
2288{
2289 CoalescedMemoryRange *cmr;
Peter Xu9c1aa1c2019-08-20 22:13:27 +08002290
2291 if (QTAILQ_EMPTY(&mr->coalesced)) {
2292 return;
2293 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002294
Jan Kiszkad4105152012-08-23 13:02:29 +02002295 qemu_flush_coalesced_mmio_buffer();
2296 mr->flush_coalesced_mmio = false;
2297
Avi Kivity093bc2c2011-07-26 14:26:01 +03002298 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2299 cmr = QTAILQ_FIRST(&mr->coalesced);
2300 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Peter Xub960fc12019-08-20 22:13:28 +08002301 memory_region_update_coalesced_range(mr, cmr, false);
Anthony Liguori7267c092011-08-20 22:09:37 -05002302 g_free(cmr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002303 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002304}
2305
Jan Kiszkad4105152012-08-23 13:02:29 +02002306void memory_region_set_flush_coalesced(MemoryRegion *mr)
2307{
2308 mr->flush_coalesced_mmio = true;
2309}
2310
2311void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2312{
2313 qemu_flush_coalesced_mmio_buffer();
2314 if (QTAILQ_EMPTY(&mr->coalesced)) {
2315 mr->flush_coalesced_mmio = false;
2316 }
2317}
2318
Jan Kiszka196ea132015-06-18 18:47:20 +02002319void memory_region_clear_global_locking(MemoryRegion *mr)
2320{
2321 mr->global_locking = false;
2322}
2323
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002324static bool userspace_eventfd_warning;
2325
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002326void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002327 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002328 unsigned size,
2329 bool match_data,
2330 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002331 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002332{
2333 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002334 .addr.start = int128_make64(addr),
2335 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002336 .match_data = match_data,
2337 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002338 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002339 };
2340 unsigned i;
2341
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002342 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2343 userspace_eventfd_warning))) {
2344 userspace_eventfd_warning = true;
2345 error_report("Using eventfd without MMIO binding in KVM. "
2346 "Suboptimal performance expected");
2347 }
2348
Jason Wangb8aecea2015-11-06 16:02:45 +08002349 if (size) {
2350 adjust_endianness(mr, &mrfd.data, size);
2351 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002352 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002353 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002354 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002355 break;
2356 }
2357 }
2358 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002359 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002360 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2361 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2362 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2363 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002364 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002365 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002366}
2367
2368void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002369 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002370 unsigned size,
2371 bool match_data,
2372 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002373 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002374{
2375 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002376 .addr.start = int128_make64(addr),
2377 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002378 .match_data = match_data,
2379 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002380 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002381 };
2382 unsigned i;
2383
Jason Wangb8aecea2015-11-06 16:02:45 +08002384 if (size) {
2385 adjust_endianness(mr, &mrfd.data, size);
2386 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002387 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002388 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002389 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002390 break;
2391 }
2392 }
2393 assert(i != mr->ioeventfd_nb);
2394 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2395 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2396 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002397 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002398 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002399 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002400 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002401}
2402
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002403static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002404{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002405 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002406 MemoryRegion *other;
2407
Jan Kiszka59023ef2012-08-23 13:02:30 +02002408 memory_region_transaction_begin();
2409
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002410 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002411 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002412 if (subregion->priority >= other->priority) {
2413 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2414 goto done;
2415 }
2416 }
2417 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2418done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002419 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002420 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002421}
2422
Peter Crosthwaite05987012014-06-05 23:14:44 -07002423static void memory_region_add_subregion_common(MemoryRegion *mr,
2424 hwaddr offset,
2425 MemoryRegion *subregion)
2426{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002427 assert(!subregion->container);
2428 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002429 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002430 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002431}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002432
2433void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002434 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002435 MemoryRegion *subregion)
2436{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002437 subregion->priority = 0;
2438 memory_region_add_subregion_common(mr, offset, subregion);
2439}
2440
2441void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002442 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002443 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002444 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002445{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002446 subregion->priority = priority;
2447 memory_region_add_subregion_common(mr, offset, subregion);
2448}
2449
2450void memory_region_del_subregion(MemoryRegion *mr,
2451 MemoryRegion *subregion)
2452{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002453 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002454 assert(subregion->container == mr);
2455 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002456 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002457 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002458 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002459 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002460}
2461
2462void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2463{
2464 if (enabled == mr->enabled) {
2465 return;
2466 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002467 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002468 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002469 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002470 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002471}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002472
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002473void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2474{
2475 Int128 s = int128_make64(size);
2476
2477 if (size == UINT64_MAX) {
2478 s = int128_2_64();
2479 }
2480 if (int128_eq(s, mr->size)) {
2481 return;
2482 }
2483 memory_region_transaction_begin();
2484 mr->size = s;
2485 memory_region_update_pending = true;
2486 memory_region_transaction_commit();
2487}
2488
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002489static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002490{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002491 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002492
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002493 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002494 memory_region_transaction_begin();
2495 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002496 memory_region_del_subregion(container, mr);
2497 mr->container = container;
2498 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002499 memory_region_unref(mr);
2500 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002501 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002502}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002503
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002504void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2505{
2506 if (addr != mr->addr) {
2507 mr->addr = addr;
2508 memory_region_readd_subregion(mr);
2509 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002510}
2511
Avi Kivitya8170e52012-10-23 12:30:10 +02002512void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002513{
Avi Kivity47033592011-12-04 19:16:50 +02002514 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002515
Jan Kiszka59023ef2012-08-23 13:02:30 +02002516 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002517 return;
2518 }
2519
Jan Kiszka59023ef2012-08-23 13:02:30 +02002520 memory_region_transaction_begin();
2521 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002522 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002523 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002524}
2525
Igor Mammedova2b257d2014-10-31 16:38:37 +00002526uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2527{
2528 return mr->align;
2529}
2530
Avi Kivitye2177952011-12-08 15:00:18 +02002531static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2532{
2533 const AddrRange *addr = addr_;
2534 const FlatRange *fr = fr_;
2535
2536 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2537 return -1;
2538 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2539 return 1;
2540 }
2541 return 0;
2542}
2543
Paolo Bonzini99e86342013-05-06 10:26:13 +02002544static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002545{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002546 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002547 sizeof(FlatRange), cmp_flatrange_addr);
2548}
2549
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002550bool memory_region_is_mapped(MemoryRegion *mr)
2551{
2552 return mr->container ? true : false;
2553}
2554
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002555/* Same as memory_region_find, but it does not add a reference to the
2556 * returned region. It must be called from an RCU critical section.
2557 */
2558static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2559 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002560{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002561 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002562 MemoryRegion *root;
2563 AddressSpace *as;
2564 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002565 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002566 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002567
Paolo Bonzini73034e92013-05-07 15:48:28 +02002568 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002569 for (root = mr; root->container; ) {
2570 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002571 addr += root->addr;
2572 }
2573
2574 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002575 if (!as) {
2576 return ret;
2577 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002578 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002579
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002580 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002581 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002582 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002583 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002584 }
2585
Paolo Bonzini99e86342013-05-06 10:26:13 +02002586 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002587 --fr;
2588 }
2589
2590 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002591 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002592 range = addrrange_intersection(range, fr->addr);
2593 ret.offset_within_region = fr->offset_in_region;
2594 ret.offset_within_region += int128_get64(int128_sub(range.start,
2595 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002596 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002597 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002598 ret.readonly = fr->readonly;
Marc-André Lureauc26763f2018-10-03 15:44:52 +04002599 ret.nonvolatile = fr->nonvolatile;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002600 return ret;
2601}
2602
2603MemoryRegionSection memory_region_find(MemoryRegion *mr,
2604 hwaddr addr, uint64_t size)
2605{
2606 MemoryRegionSection ret;
2607 rcu_read_lock();
2608 ret = memory_region_find_rcu(mr, addr, size);
2609 if (ret.mr) {
2610 memory_region_ref(ret.mr);
2611 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002612 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002613 return ret;
2614}
2615
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002616bool memory_region_present(MemoryRegion *container, hwaddr addr)
2617{
2618 MemoryRegion *mr;
2619
2620 rcu_read_lock();
2621 mr = memory_region_find_rcu(container, addr, 1).mr;
2622 rcu_read_unlock();
2623 return mr && mr != container;
2624}
2625
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002626void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002627{
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002628 memory_region_sync_dirty_bitmap(NULL);
Avi Kivity7664e802011-12-11 14:47:25 +02002629}
2630
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01002631void memory_global_after_dirty_log_sync(void)
2632{
2633 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2634}
2635
Jay Zhou19310762017-07-28 18:28:53 +08002636static VMChangeStateEntry *vmstate_change;
2637
Avi Kivity7664e802011-12-11 14:47:25 +02002638void memory_global_dirty_log_start(void)
2639{
Jay Zhou19310762017-07-28 18:28:53 +08002640 if (vmstate_change) {
2641 qemu_del_vm_change_state_handler(vmstate_change);
2642 vmstate_change = NULL;
2643 }
2644
Avi Kivity7664e802011-12-11 14:47:25 +02002645 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002646
Avi Kivity7376e582012-02-08 21:05:17 +02002647 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002648
Wei Yang39adb532019-04-26 10:09:27 +08002649 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002650 memory_region_transaction_begin();
2651 memory_region_update_pending = true;
2652 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002653}
2654
Jay Zhou19310762017-07-28 18:28:53 +08002655static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002656{
Avi Kivity7664e802011-12-11 14:47:25 +02002657 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002658
Wei Yang39adb532019-04-26 10:09:27 +08002659 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002660 memory_region_transaction_begin();
2661 memory_region_update_pending = true;
2662 memory_region_transaction_commit();
2663
Avi Kivity7376e582012-02-08 21:05:17 +02002664 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002665}
2666
Jay Zhou19310762017-07-28 18:28:53 +08002667static void memory_vm_change_state_handler(void *opaque, int running,
2668 RunState state)
2669{
2670 if (running) {
2671 memory_global_dirty_log_do_stop();
2672
2673 if (vmstate_change) {
2674 qemu_del_vm_change_state_handler(vmstate_change);
2675 vmstate_change = NULL;
2676 }
2677 }
2678}
2679
2680void memory_global_dirty_log_stop(void)
2681{
2682 if (!runstate_is_running()) {
2683 if (vmstate_change) {
2684 return;
2685 }
2686 vmstate_change = qemu_add_vm_change_state_handler(
2687 memory_vm_change_state_handler, NULL);
2688 return;
2689 }
2690
2691 memory_global_dirty_log_do_stop();
2692}
2693
Avi Kivity7664e802011-12-11 14:47:25 +02002694static void listener_add_address_space(MemoryListener *listener,
2695 AddressSpace *as)
2696{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002697 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002698 FlatRange *fr;
2699
Paolo Bonzini680a4782015-11-02 09:23:52 +01002700 if (listener->begin) {
2701 listener->begin(listener);
2702 }
Avi Kivity7664e802011-12-11 14:47:25 +02002703 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002704 if (listener->log_global_start) {
2705 listener->log_global_start(listener);
2706 }
Avi Kivity7664e802011-12-11 14:47:25 +02002707 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002708
Paolo Bonzini856d7242013-05-06 11:57:21 +02002709 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002710 FOR_EACH_FLAT_RANGE(fr, view) {
David Hildenbrand279836f2017-10-16 16:43:02 +02002711 MemoryRegionSection section = section_from_flat_range(fr, view);
2712
Avi Kivity975aefe2012-10-02 16:39:57 +02002713 if (listener->region_add) {
2714 listener->region_add(listener, &section);
2715 }
David Hildenbrandae990e62017-10-16 16:42:56 +02002716 if (fr->dirty_log_mask && listener->log_start) {
2717 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2718 }
Avi Kivity7664e802011-12-11 14:47:25 +02002719 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002720 if (listener->commit) {
2721 listener->commit(listener);
2722 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002723 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002724}
2725
Peter Xud25836c2018-01-22 14:02:44 +08002726static void listener_del_address_space(MemoryListener *listener,
2727 AddressSpace *as)
2728{
2729 FlatView *view;
2730 FlatRange *fr;
2731
2732 if (listener->begin) {
2733 listener->begin(listener);
2734 }
2735 view = address_space_get_flatview(as);
2736 FOR_EACH_FLAT_RANGE(fr, view) {
2737 MemoryRegionSection section = section_from_flat_range(fr, view);
2738
2739 if (fr->dirty_log_mask && listener->log_stop) {
2740 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2741 }
2742 if (listener->region_del) {
2743 listener->region_del(listener, &section);
2744 }
2745 }
2746 if (listener->commit) {
2747 listener->commit(listener);
2748 }
2749 flatview_unref(view);
2750}
2751
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002752void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002753{
Avi Kivity72e22d22012-02-08 15:05:50 +02002754 MemoryListener *other = NULL;
2755
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002756 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002757 if (QTAILQ_EMPTY(&memory_listeners)
Paolo Bonzinieae3eb32018-12-06 13:10:34 +01002758 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
Avi Kivity72e22d22012-02-08 15:05:50 +02002759 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2760 } else {
2761 QTAILQ_FOREACH(other, &memory_listeners, link) {
2762 if (listener->priority < other->priority) {
2763 break;
2764 }
2765 }
2766 QTAILQ_INSERT_BEFORE(other, listener, link);
2767 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002768
Paolo Bonzini9a546352016-09-22 16:23:06 +02002769 if (QTAILQ_EMPTY(&as->listeners)
Paolo Bonzinieae3eb32018-12-06 13:10:34 +01002770 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
Paolo Bonzini9a546352016-09-22 16:23:06 +02002771 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2772 } else {
2773 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2774 if (listener->priority < other->priority) {
2775 break;
2776 }
2777 }
2778 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2779 }
2780
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002781 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002782}
2783
2784void memory_listener_unregister(MemoryListener *listener)
2785{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002786 if (!listener->address_space) {
2787 return;
2788 }
2789
Peter Xud25836c2018-01-22 14:02:44 +08002790 listener_del_address_space(listener, listener->address_space);
Avi Kivity72e22d22012-02-08 15:05:50 +02002791 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002792 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002793 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002794}
Avi Kivitye2177952011-12-08 15:00:18 +02002795
Greg Kurza2166412019-06-21 11:27:33 +02002796void address_space_remove_listeners(AddressSpace *as)
2797{
2798 while (!QTAILQ_EMPTY(&as->listeners)) {
2799 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2800 }
2801}
2802
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002803void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002804{
Paolo Bonziniac951902015-02-11 15:21:04 +01002805 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002806 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002807 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002808 as->ioeventfd_nb = 0;
2809 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002810 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002811 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002812 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002813 address_space_update_topology(as);
2814 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002815}
Avi Kivity658b2222011-07-26 14:26:08 +03002816
Paolo Bonzini374f2982013-05-17 12:37:03 +02002817static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002818{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002819 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002820
Paolo Bonzini856d7242013-05-06 11:57:21 +02002821 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002822 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002823 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002824 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002825}
2826
Paolo Bonzini374f2982013-05-17 12:37:03 +02002827void address_space_destroy(AddressSpace *as)
2828{
Paolo Bonziniac951902015-02-11 15:21:04 +01002829 MemoryRegion *root = as->root;
2830
Paolo Bonzini374f2982013-05-17 12:37:03 +02002831 /* Flush out anything from MemoryListeners listening in on this */
2832 memory_region_transaction_begin();
2833 as->root = NULL;
2834 memory_region_transaction_commit();
2835 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2836
2837 /* At this point, as->dispatch and as->current_map are dummy
2838 * entries that the guest should never use. Wait for the old
2839 * values to expire before freeing the data.
2840 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002841 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002842 call_rcu(as, do_address_space_destroy, rcu);
2843}
2844
Peter Xu4e831902017-01-16 16:40:04 +08002845static const char *memory_region_type(MemoryRegion *mr)
2846{
2847 if (memory_region_is_ram_device(mr)) {
2848 return "ramd";
2849 } else if (memory_region_is_romd(mr)) {
2850 return "romd";
2851 } else if (memory_region_is_rom(mr)) {
2852 return "rom";
2853 } else if (memory_region_is_ram(mr)) {
2854 return "ram";
2855 } else {
2856 return "i/o";
2857 }
2858}
2859
Blue Swirl314e2982011-09-11 20:22:05 +00002860typedef struct MemoryRegionList MemoryRegionList;
2861
2862struct MemoryRegionList {
2863 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002864 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002865};
2866
Paolo Bonzinib58deb32018-12-06 11:58:10 +01002867typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002868
Peter Xu4e831902017-01-16 16:40:04 +08002869#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2870 int128_sub((size), int128_one())) : 0)
2871#define MTREE_INDENT " "
2872
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002873static void mtree_expand_owner(const char *label, Object *obj)
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002874{
2875 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2876
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002877 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002878 if (dev && dev->id) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002879 qemu_printf(" id=%s", dev->id);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002880 } else {
2881 gchar *canonical_path = object_get_canonical_path(obj);
2882 if (canonical_path) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002883 qemu_printf(" path=%s", canonical_path);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002884 g_free(canonical_path);
2885 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002886 qemu_printf(" type=%s", object_get_typename(obj));
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002887 }
2888 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002889 qemu_printf("}");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002890}
2891
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002892static void mtree_print_mr_owner(const MemoryRegion *mr)
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002893{
2894 Object *owner = mr->owner;
2895 Object *parent = memory_region_owner((MemoryRegion *)mr);
2896
2897 if (!owner && !parent) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002898 qemu_printf(" orphan");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002899 return;
2900 }
2901 if (owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002902 mtree_expand_owner("owner", owner);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002903 }
2904 if (parent && parent != owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002905 mtree_expand_owner("parent", parent);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002906 }
2907}
2908
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002909static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002910 hwaddr base,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002911 MemoryRegionListHead *alias_print_queue,
2912 bool owner)
Blue Swirl314e2982011-09-11 20:22:05 +00002913{
Jan Kiszka9479c572011-09-27 15:00:41 +02002914 MemoryRegionList *new_ml, *ml, *next_ml;
2915 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002916 const MemoryRegion *submr;
2917 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002918 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002919
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002920 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002921 return;
2922 }
2923
2924 for (i = 0; i < level; i++) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002925 qemu_printf(MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002926 }
2927
Peter Xub31f8412017-03-14 20:56:27 +08002928 cur_start = base + mr->addr;
2929 cur_end = cur_start + MR_SIZE(mr->size);
2930
2931 /*
2932 * Try to detect overflow of memory region. This should never
2933 * happen normally. When it happens, we dump something to warn the
2934 * user who is observing this.
2935 */
2936 if (cur_start < base || cur_end < cur_start) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002937 qemu_printf("[DETECTED OVERFLOW!] ");
Peter Xub31f8412017-03-14 20:56:27 +08002938 }
2939
Blue Swirl314e2982011-09-11 20:22:05 +00002940 if (mr->alias) {
2941 MemoryRegionList *ml;
2942 bool found = false;
2943
2944 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002945 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002946 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002947 found = true;
2948 }
2949 }
2950
2951 if (!found) {
2952 ml = g_new(MemoryRegionList, 1);
2953 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002954 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002955 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002956 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2957 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2958 "-" TARGET_FMT_plx "%s",
2959 cur_start, cur_end,
2960 mr->priority,
2961 mr->nonvolatile ? "nv-" : "",
2962 memory_region_type((MemoryRegion *)mr),
2963 memory_region_name(mr),
2964 memory_region_name(mr->alias),
2965 mr->alias_offset,
2966 mr->alias_offset + MR_SIZE(mr->size),
2967 mr->enabled ? "" : " [disabled]");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002968 if (owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002969 mtree_print_mr_owner(mr);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002970 }
Blue Swirl314e2982011-09-11 20:22:05 +00002971 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002972 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2973 " (prio %d, %s%s): %s%s",
2974 cur_start, cur_end,
2975 mr->priority,
2976 mr->nonvolatile ? "nv-" : "",
2977 memory_region_type((MemoryRegion *)mr),
2978 memory_region_name(mr),
2979 mr->enabled ? "" : " [disabled]");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002980 if (owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002981 mtree_print_mr_owner(mr);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002982 }
Blue Swirl314e2982011-09-11 20:22:05 +00002983 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02002984 qemu_printf("\n");
Jan Kiszka9479c572011-09-27 15:00:41 +02002985
2986 QTAILQ_INIT(&submr_print_queue);
2987
Blue Swirl314e2982011-09-11 20:22:05 +00002988 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002989 new_ml = g_new(MemoryRegionList, 1);
2990 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002991 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002992 if (new_ml->mr->addr < ml->mr->addr ||
2993 (new_ml->mr->addr == ml->mr->addr &&
2994 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002995 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002996 new_ml = NULL;
2997 break;
2998 }
2999 }
3000 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003001 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02003002 }
3003 }
3004
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003005 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003006 mtree_print_mr(ml->mr, level + 1, cur_start,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003007 alias_print_queue, owner);
Jan Kiszka9479c572011-09-27 15:00:41 +02003008 }
3009
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003010 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02003011 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003012 }
3013}
3014
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003015struct FlatViewInfo {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003016 int counter;
3017 bool dispatch_tree;
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003018 bool owner;
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003019 AccelClass *ac;
3020 const char *ac_name;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003021};
3022
3023static void mtree_print_flatview(gpointer key, gpointer value,
3024 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08003025{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003026 FlatView *view = key;
3027 GArray *fv_address_spaces = value;
3028 struct FlatViewInfo *fvi = user_data;
Peter Xu57bb40c2017-01-16 16:40:05 +08003029 FlatRange *range = &view->ranges[0];
3030 MemoryRegion *mr;
3031 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003032 int i;
3033 AddressSpace *as;
3034
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003035 qemu_printf("FlatView #%d\n", fvi->counter);
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003036 ++fvi->counter;
3037
3038 for (i = 0; i < fv_address_spaces->len; ++i) {
3039 as = g_array_index(fv_address_spaces, AddressSpace*, i);
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003040 qemu_printf(" AS \"%s\", root: %s",
3041 as->name, memory_region_name(as->root));
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003042 if (as->root->alias) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003043 qemu_printf(", alias %s", memory_region_name(as->root->alias));
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003044 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003045 qemu_printf("\n");
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003046 }
3047
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003048 qemu_printf(" Root memory region: %s\n",
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003049 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08003050
3051 if (n <= 0) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003052 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003053 return;
3054 }
3055
3056 while (n--) {
3057 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003058 if (range->offset_in_region) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003059 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3060 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3061 int128_get64(range->addr.start),
3062 int128_get64(range->addr.start)
3063 + MR_SIZE(range->addr.size),
3064 mr->priority,
3065 range->nonvolatile ? "nv-" : "",
3066 range->readonly ? "rom" : memory_region_type(mr),
3067 memory_region_name(mr),
3068 range->offset_in_region);
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003069 } else {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003070 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3071 " (prio %d, %s%s): %s",
3072 int128_get64(range->addr.start),
3073 int128_get64(range->addr.start)
3074 + MR_SIZE(range->addr.size),
3075 mr->priority,
3076 range->nonvolatile ? "nv-" : "",
3077 range->readonly ? "rom" : memory_region_type(mr),
3078 memory_region_name(mr));
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003079 }
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003080 if (fvi->owner) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003081 mtree_print_mr_owner(mr);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003082 }
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003083
3084 if (fvi->ac) {
3085 for (i = 0; i < fv_address_spaces->len; ++i) {
3086 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3087 if (fvi->ac->has_memory(current_machine, as,
3088 int128_get64(range->addr.start),
3089 MR_SIZE(range->addr.size) + 1)) {
3090 qemu_printf(" %s", fvi->ac_name);
3091 }
3092 }
3093 }
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003094 qemu_printf("\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003095 range++;
3096 }
3097
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003098#if !defined(CONFIG_USER_ONLY)
3099 if (fvi->dispatch_tree && view->root) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003100 mtree_print_dispatch(view->dispatch, view->root);
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003101 }
3102#endif
3103
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003104 qemu_printf("\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003105}
3106
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003107static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3108 gpointer user_data)
3109{
3110 FlatView *view = key;
3111 GArray *fv_address_spaces = value;
3112
3113 g_array_unref(fv_address_spaces);
3114 flatview_unref(view);
3115
3116 return true;
3117}
3118
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003119void mtree_info(bool flatview, bool dispatch_tree, bool owner)
Blue Swirl314e2982011-09-11 20:22:05 +00003120{
3121 MemoryRegionListHead ml_head;
3122 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02003123 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00003124
Peter Xu57bb40c2017-01-16 16:40:05 +08003125 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003126 FlatView *view;
3127 struct FlatViewInfo fvi = {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003128 .counter = 0,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003129 .dispatch_tree = dispatch_tree,
3130 .owner = owner,
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003131 };
3132 GArray *fv_address_spaces;
3133 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
Alexey Kardashevskiy8072aae2019-06-14 11:52:37 +10003134 AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator);
3135
3136 if (ac->has_memory) {
3137 fvi.ac = ac;
3138 fvi.ac_name = current_machine->accel ? current_machine->accel :
3139 object_class_get_name(OBJECT_CLASS(ac));
3140 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003141
3142 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003143 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003144 view = address_space_get_flatview(as);
3145
3146 fv_address_spaces = g_hash_table_lookup(views, view);
3147 if (!fv_address_spaces) {
3148 fv_address_spaces = g_array_new(false, false, sizeof(as));
3149 g_hash_table_insert(views, view, fv_address_spaces);
3150 }
3151
3152 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003153 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003154
3155 /* Print */
3156 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3157
3158 /* Free */
3159 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3160 g_hash_table_unref(views);
3161
Peter Xu57bb40c2017-01-16 16:40:05 +08003162 return;
3163 }
3164
Blue Swirl314e2982011-09-11 20:22:05 +00003165 QTAILQ_INIT(&ml_head);
3166
Avi Kivity0d673e32012-10-02 15:28:50 +02003167 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003168 qemu_printf("address-space: %s\n", as->name);
3169 mtree_print_mr(as->root, 1, 0, &ml_head, owner);
3170 qemu_printf("\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003171 }
3172
Blue Swirl314e2982011-09-11 20:22:05 +00003173 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003174 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Markus Armbrusterb6b71cb2019-04-17 21:17:56 +02003175 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3176 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
3177 qemu_printf("\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003178 }
3179
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003180 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003181 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003182 }
Blue Swirl314e2982011-09-11 20:22:05 +00003183}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003184
Peter Maydellb08199c2017-07-07 15:42:51 +01003185void memory_region_init_ram(MemoryRegion *mr,
3186 struct Object *owner,
3187 const char *name,
3188 uint64_t size,
3189 Error **errp)
3190{
3191 DeviceState *owner_dev;
3192 Error *err = NULL;
3193
3194 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3195 if (err) {
3196 error_propagate(errp, err);
3197 return;
3198 }
3199 /* This will assert if owner is neither NULL nor a DeviceState.
3200 * We only want the owner here for the purposes of defining a
3201 * unique name for migration. TODO: Ideally we should implement
3202 * a naming scheme for Objects which are not DeviceStates, in
3203 * which case we can relax this restriction.
3204 */
3205 owner_dev = DEVICE(owner);
3206 vmstate_register_ram(mr, owner_dev);
3207}
3208
3209void memory_region_init_rom(MemoryRegion *mr,
3210 struct Object *owner,
3211 const char *name,
3212 uint64_t size,
3213 Error **errp)
3214{
3215 DeviceState *owner_dev;
3216 Error *err = NULL;
3217
3218 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3219 if (err) {
3220 error_propagate(errp, err);
3221 return;
3222 }
3223 /* This will assert if owner is neither NULL nor a DeviceState.
3224 * We only want the owner here for the purposes of defining a
3225 * unique name for migration. TODO: Ideally we should implement
3226 * a naming scheme for Objects which are not DeviceStates, in
3227 * which case we can relax this restriction.
3228 */
3229 owner_dev = DEVICE(owner);
3230 vmstate_register_ram(mr, owner_dev);
3231}
3232
3233void memory_region_init_rom_device(MemoryRegion *mr,
3234 struct Object *owner,
3235 const MemoryRegionOps *ops,
3236 void *opaque,
3237 const char *name,
3238 uint64_t size,
3239 Error **errp)
3240{
3241 DeviceState *owner_dev;
3242 Error *err = NULL;
3243
3244 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3245 name, size, &err);
3246 if (err) {
3247 error_propagate(errp, err);
3248 return;
3249 }
3250 /* This will assert if owner is neither NULL nor a DeviceState.
3251 * We only want the owner here for the purposes of defining a
3252 * unique name for migration. TODO: Ideally we should implement
3253 * a naming scheme for Objects which are not DeviceStates, in
3254 * which case we can relax this restriction.
3255 */
3256 owner_dev = DEVICE(owner);
3257 vmstate_register_ram(mr, owner_dev);
3258}
3259
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003260static const TypeInfo memory_region_info = {
3261 .parent = TYPE_OBJECT,
3262 .name = TYPE_MEMORY_REGION,
Markus Armbruster1b53ecd2019-08-12 07:23:34 +02003263 .class_size = sizeof(MemoryRegionClass),
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003264 .instance_size = sizeof(MemoryRegion),
3265 .instance_init = memory_region_initfn,
3266 .instance_finalize = memory_region_finalize,
3267};
3268
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003269static const TypeInfo iommu_memory_region_info = {
3270 .parent = TYPE_MEMORY_REGION,
3271 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003272 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003273 .instance_size = sizeof(IOMMUMemoryRegion),
3274 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003275 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003276};
3277
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003278static void memory_register_types(void)
3279{
3280 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003281 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003282}
3283
3284type_init(memory_register_types)