blob: 5686698542cea0d77ff9194ad14841f6b62e3b96 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020042static bool global_dirty_log = false;
43
Avi Kivity72e22d22012-02-08 15:05:50 +020044static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100050static GHashTable *flat_views;
51
Avi Kivity093bc2c2011-07-26 14:26:01 +030052typedef struct AddrRange AddrRange;
53
Avi Kivity8417ceb2011-08-03 11:56:14 +030054/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080055 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030056 * (large MemoryRegion::alias_offset).
57 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030058struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020059 Int128 start;
60 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030061};
62
Avi Kivity08dafab2011-10-16 13:19:17 +020063static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030064{
65 return (AddrRange) { start, size };
66}
67
68static bool addrrange_equal(AddrRange r1, AddrRange r2)
69{
Avi Kivity08dafab2011-10-16 13:19:17 +020070 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030071}
72
Avi Kivity08dafab2011-10-16 13:19:17 +020073static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030074{
Avi Kivity08dafab2011-10-16 13:19:17 +020075 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030076}
77
Avi Kivity08dafab2011-10-16 13:19:17 +020078static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030079{
Avi Kivity08dafab2011-10-16 13:19:17 +020080 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030081 return range;
82}
83
Avi Kivity08dafab2011-10-16 13:19:17 +020084static bool addrrange_contains(AddrRange range, Int128 addr)
85{
86 return int128_ge(addr, range.start)
87 && int128_lt(addr, addrrange_end(range));
88}
89
Avi Kivity093bc2c2011-07-26 14:26:01 +030090static bool addrrange_intersects(AddrRange r1, AddrRange r2)
91{
Avi Kivity08dafab2011-10-16 13:19:17 +020092 return addrrange_contains(r1, r2.start)
93 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030094}
95
96static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
97{
Avi Kivity08dafab2011-10-16 13:19:17 +020098 Int128 start = int128_max(r1.start, r2.start);
99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
100 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300101}
102
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200103enum ListenerDirection { Forward, Reverse };
104
Avi Kivity7376e582012-02-08 21:05:17 +0200105#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200106 do { \
107 MemoryListener *_listener; \
108 \
109 switch (_direction) { \
110 case Forward: \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
114 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200115 } \
116 break; \
117 case Reverse: \
118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
119 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
Paolo Bonzini9a546352016-09-22 16:23:06 +0200130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200131 do { \
132 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200133 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200134 \
135 switch (_direction) { \
136 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200137 QTAILQ_FOREACH(_listener, list, link_as) { \
138 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200139 _listener->_callback(_listener, _section, ##_args); \
140 } \
141 } \
142 break; \
143 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200144 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
145 link_as) { \
146 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200147 _listener->_callback(_listener, _section, ##_args); \
148 } \
149 } \
150 break; \
151 default: \
152 abort(); \
153 } \
154 } while (0)
155
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200156/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200157#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200158 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000159 MemoryRegionSection mrs = section_from_flat_range(fr, \
160 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200161 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200162 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200163
Avi Kivity093bc2c2011-07-26 14:26:01 +0300164struct CoalescedMemoryRange {
165 AddrRange addr;
166 QTAILQ_ENTRY(CoalescedMemoryRange) link;
167};
168
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300169struct MemoryRegionIoeventfd {
170 AddrRange addr;
171 bool match_data;
172 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200173 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300174};
175
176static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
177 MemoryRegionIoeventfd b)
178{
Avi Kivity08dafab2011-10-16 13:19:17 +0200179 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200181 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200183 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300184 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200185 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300186 return false;
187 } else if (a.match_data < b.match_data) {
188 return true;
189 } else if (a.match_data > b.match_data) {
190 return false;
191 } else if (a.match_data) {
192 if (a.data < b.data) {
193 return true;
194 } else if (a.data > b.data) {
195 return false;
196 }
197 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200198 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300199 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200200 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300201 return false;
202 }
203 return false;
204}
205
206static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
207 MemoryRegionIoeventfd b)
208{
209 return !memory_region_ioeventfd_before(a, b)
210 && !memory_region_ioeventfd_before(b, a);
211}
212
Avi Kivity093bc2c2011-07-26 14:26:01 +0300213typedef struct FlatRange FlatRange;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300214
215/* Range of memory in the global map. Addresses are absolute. */
216struct FlatRange {
217 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200218 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300219 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300220 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200221 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300222 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300223};
224
225/* Flattened global view of current active memory hierarchy. Kept in sorted
226 * order.
227 */
228struct FlatView {
Paolo Bonzini374f2982013-05-17 12:37:03 +0200229 struct rcu_head rcu;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200230 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300231 FlatRange *ranges;
232 unsigned nr;
233 unsigned nr_allocated;
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000234 struct AddressSpaceDispatch *dispatch;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000235 MemoryRegion *root;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300236};
237
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300238typedef struct AddressSpaceOps AddressSpaceOps;
239
Avi Kivity093bc2c2011-07-26 14:26:01 +0300240#define FOR_EACH_FLAT_RANGE(var, view) \
241 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
242
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200243static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000244section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200245{
246 return (MemoryRegionSection) {
247 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000248 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200249 .offset_within_region = fr->offset_in_region,
250 .size = fr->addr.size,
251 .offset_within_address_space = int128_get64(fr->addr.start),
252 .readonly = fr->readonly,
253 };
254}
255
Avi Kivity093bc2c2011-07-26 14:26:01 +0300256static bool flatrange_equal(FlatRange *a, FlatRange *b)
257{
258 return a->mr == b->mr
259 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300260 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200261 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300262 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300263}
264
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000265static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300266{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000267 FlatView *view;
268
269 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200270 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000271 view->root = mr_root;
272 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200273 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000274
275 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300276}
277
278/* Insert a range into a given position. Caller is responsible for maintaining
279 * sorting order.
280 */
281static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
282{
283 if (view->nr == view->nr_allocated) {
284 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500285 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300286 view->nr_allocated * sizeof(*view->ranges));
287 }
288 memmove(view->ranges + pos + 1, view->ranges + pos,
289 (view->nr - pos) * sizeof(FlatRange));
290 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200291 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300292 ++view->nr;
293}
294
295static void flatview_destroy(FlatView *view)
296{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200297 int i;
298
Paolo Bonzini02d96512017-09-21 12:34:00 +0200299 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000300 if (view->dispatch) {
301 address_space_dispatch_free(view->dispatch);
302 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200303 for (i = 0; i < view->nr; i++) {
304 memory_region_unref(view->ranges[i].mr);
305 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500306 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000307 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200308 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300309}
310
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200311static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200312{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200313 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200314}
315
316static void flatview_unref(FlatView *view)
317{
318 if (atomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200319 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000320 assert(view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000321 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200322 }
323}
324
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000325FlatView *address_space_to_flatview(AddressSpace *as)
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000326{
327 return atomic_rcu_read(&as->current_map);
328}
329
330AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
331{
332 return fv->dispatch;
333}
334
335AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
336{
337 return flatview_to_dispatch(address_space_to_flatview(as));
338}
339
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300340static bool can_merge(FlatRange *r1, FlatRange *r2)
341{
Avi Kivity08dafab2011-10-16 13:19:17 +0200342 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300343 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200344 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
345 r1->addr.size),
346 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300347 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200348 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300349 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300350}
351
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000352/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300353static void flatview_simplify(FlatView *view)
354{
355 unsigned i, j;
356
357 i = 0;
358 while (i < view->nr) {
359 j = i + 1;
360 while (j < view->nr
361 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200362 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300363 ++j;
364 }
365 ++i;
366 memmove(&view->ranges[i], &view->ranges[j],
367 (view->nr - j) * sizeof(view->ranges[j]));
368 view->nr -= j - i;
369 }
370}
371
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200372static bool memory_region_big_endian(MemoryRegion *mr)
373{
374#ifdef TARGET_WORDS_BIGENDIAN
375 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
376#else
377 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
378#endif
379}
380
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200381static bool memory_region_wrong_endianness(MemoryRegion *mr)
382{
383#ifdef TARGET_WORDS_BIGENDIAN
384 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
385#else
386 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
387#endif
388}
389
390static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
391{
392 if (memory_region_wrong_endianness(mr)) {
393 switch (size) {
394 case 1:
395 break;
396 case 2:
397 *data = bswap16(*data);
398 break;
399 case 4:
400 *data = bswap32(*data);
401 break;
402 case 8:
403 *data = bswap64(*data);
404 break;
405 default:
406 abort();
407 }
408 }
409}
410
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800411static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
412{
413 MemoryRegion *root;
414 hwaddr abs_addr = offset;
415
416 abs_addr += mr->addr;
417 for (root = mr; root->container; ) {
418 root = root->container;
419 abs_addr += root->addr;
420 }
421
422 return abs_addr;
423}
424
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800425static int get_cpu_index(void)
426{
427 if (current_cpu) {
428 return current_cpu->cpu_index;
429 }
430 return -1;
431}
432
Peter Maydellcc05c432015-04-26 16:49:23 +0100433static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
434 hwaddr addr,
435 uint64_t *value,
436 unsigned size,
437 unsigned shift,
438 uint64_t mask,
439 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200440{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200441 uint64_t tmp;
442
443 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800444 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800445 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800446 } else if (mr == &io_mem_notdirty) {
447 /* Accesses to code which has previously been translated into a TB show
448 * up in the MMIO path, as accesses to the io_mem_notdirty
449 * MemoryRegion. */
450 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800451 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
452 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800453 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800454 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200455 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100456 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200457}
458
Peter Maydellcc05c432015-04-26 16:49:23 +0100459static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
460 hwaddr addr,
461 uint64_t *value,
462 unsigned size,
463 unsigned shift,
464 uint64_t mask,
465 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300466{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300467 uint64_t tmp;
468
469 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800470 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800471 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800472 } else if (mr == &io_mem_notdirty) {
473 /* Accesses to code which has previously been translated into a TB show
474 * up in the MMIO path, as accesses to the io_mem_notdirty
475 * MemoryRegion. */
476 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800477 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
478 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800479 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800480 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300481 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100482 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300483}
484
Peter Maydellcc05c432015-04-26 16:49:23 +0100485static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
486 hwaddr addr,
487 uint64_t *value,
488 unsigned size,
489 unsigned shift,
490 uint64_t mask,
491 MemTxAttrs attrs)
492{
493 uint64_t tmp = 0;
494 MemTxResult r;
495
Peter Maydellcc05c432015-04-26 16:49:23 +0100496 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800497 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800498 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800499 } else if (mr == &io_mem_notdirty) {
500 /* Accesses to code which has previously been translated into a TB show
501 * up in the MMIO path, as accesses to the io_mem_notdirty
502 * MemoryRegion. */
503 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800504 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
505 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800506 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800507 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100508 *value |= (tmp & mask) << shift;
509 return r;
510}
511
512static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
513 hwaddr addr,
514 uint64_t *value,
515 unsigned size,
516 unsigned shift,
517 uint64_t mask,
518 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200519{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200520 uint64_t tmp;
521
522 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800523 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800524 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800525 } else if (mr == &io_mem_notdirty) {
526 /* Accesses to code which has previously been translated into a TB show
527 * up in the MMIO path, as accesses to the io_mem_notdirty
528 * MemoryRegion. */
529 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800530 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
531 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800532 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800533 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200534 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100535 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200536}
537
Peter Maydellcc05c432015-04-26 16:49:23 +0100538static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
539 hwaddr addr,
540 uint64_t *value,
541 unsigned size,
542 unsigned shift,
543 uint64_t mask,
544 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300545{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300546 uint64_t tmp;
547
548 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800549 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800550 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800551 } else if (mr == &io_mem_notdirty) {
552 /* Accesses to code which has previously been translated into a TB show
553 * up in the MMIO path, as accesses to the io_mem_notdirty
554 * MemoryRegion. */
555 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800556 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
557 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800558 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800559 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300560 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100561 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300562}
563
Peter Maydellcc05c432015-04-26 16:49:23 +0100564static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
565 hwaddr addr,
566 uint64_t *value,
567 unsigned size,
568 unsigned shift,
569 uint64_t mask,
570 MemTxAttrs attrs)
571{
572 uint64_t tmp;
573
Peter Maydellcc05c432015-04-26 16:49:23 +0100574 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800575 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800576 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800577 } else if (mr == &io_mem_notdirty) {
578 /* Accesses to code which has previously been translated into a TB show
579 * up in the MMIO path, as accesses to the io_mem_notdirty
580 * MemoryRegion. */
581 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800582 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
583 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800584 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800585 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100586 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
587}
588
589static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300590 uint64_t *value,
591 unsigned size,
592 unsigned access_size_min,
593 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200594 MemTxResult (*access_fn)
595 (MemoryRegion *mr,
596 hwaddr addr,
597 uint64_t *value,
598 unsigned size,
599 unsigned shift,
600 uint64_t mask,
601 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100602 MemoryRegion *mr,
603 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300604{
605 uint64_t access_mask;
606 unsigned access_size;
607 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100608 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300609
610 if (!access_size_min) {
611 access_size_min = 1;
612 }
613 if (!access_size_max) {
614 access_size_max = 4;
615 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200616
617 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300618 access_size = MAX(MIN(size, access_size_max), access_size_min);
619 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200620 if (memory_region_big_endian(mr)) {
621 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200622 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100623 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200624 }
625 } else {
626 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200627 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100628 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200629 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300630 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100631 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300632}
633
Avi Kivitye2177952011-12-08 15:00:18 +0200634static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
635{
Avi Kivity0d673e32012-10-02 15:28:50 +0200636 AddressSpace *as;
637
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200638 while (mr->container) {
639 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200640 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200641 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
642 if (mr == as->root) {
643 return as;
644 }
Avi Kivitye2177952011-12-08 15:00:18 +0200645 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200646 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200647}
648
Avi Kivity093bc2c2011-07-26 14:26:01 +0300649/* Render a memory region into the global view. Ranges in @view obscure
650 * ranges in @mr.
651 */
652static void render_memory_region(FlatView *view,
653 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200654 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300655 AddrRange clip,
656 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300657{
658 MemoryRegion *subregion;
659 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200660 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200661 Int128 remain;
662 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300663 FlatRange fr;
664 AddrRange tmp;
665
Avi Kivity6bba19b2011-09-14 11:54:58 +0300666 if (!mr->enabled) {
667 return;
668 }
669
Avi Kivity08dafab2011-10-16 13:19:17 +0200670 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300671 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300672
673 tmp = addrrange_make(base, mr->size);
674
675 if (!addrrange_intersects(tmp, clip)) {
676 return;
677 }
678
679 clip = addrrange_intersection(tmp, clip);
680
681 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200682 int128_subfrom(&base, int128_make64(mr->alias->addr));
683 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300684 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300685 return;
686 }
687
688 /* Render subregions in priority order. */
689 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300690 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300691 }
692
Avi Kivity14a3c102011-07-26 14:26:06 +0300693 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300694 return;
695 }
696
Avi Kivity08dafab2011-10-16 13:19:17 +0200697 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300698 base = clip.start;
699 remain = clip.size;
700
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000701 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100702 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200703 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000704 fr.readonly = readonly;
705
Avi Kivity093bc2c2011-07-26 14:26:01 +0300706 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200707 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
708 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300709 continue;
710 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200711 if (int128_lt(base, view->ranges[i].addr.start)) {
712 now = int128_min(remain,
713 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300714 fr.offset_in_region = offset_in_region;
715 fr.addr = addrrange_make(base, now);
716 flatview_insert(view, i, &fr);
717 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200718 int128_addto(&base, now);
719 offset_in_region += int128_get64(now);
720 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300721 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200722 now = int128_sub(int128_min(int128_add(base, remain),
723 addrrange_end(view->ranges[i].addr)),
724 base);
725 int128_addto(&base, now);
726 offset_in_region += int128_get64(now);
727 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300728 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200729 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300730 fr.offset_in_region = offset_in_region;
731 fr.addr = addrrange_make(base, remain);
732 flatview_insert(view, i, &fr);
733 }
734}
735
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000736static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
737{
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200738 while (mr->enabled) {
739 if (mr->alias) {
740 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
741 /* The alias is included in its entirety. Use it as
742 * the "real" root, so that we can share more FlatViews.
743 */
744 mr = mr->alias;
745 continue;
746 }
747 } else if (!mr->terminates) {
748 unsigned int found = 0;
749 MemoryRegion *child, *next = NULL;
750 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
751 if (child->enabled) {
752 if (++found > 1) {
753 next = NULL;
754 break;
755 }
756 if (!child->addr && int128_ge(mr->size, child->size)) {
757 /* A child is included in its entirety. If it's the only
758 * enabled one, use it in the hope of finding an alias down the
759 * way. This will also let us share FlatViews.
760 */
761 next = child;
762 }
763 }
764 }
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000765 if (found == 0) {
766 return NULL;
767 }
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200768 if (next) {
769 mr = next;
770 continue;
771 }
772 }
773
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000774 return mr;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000775 }
776
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000777 return NULL;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000778}
779
Avi Kivity093bc2c2011-07-26 14:26:01 +0300780/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200781static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300782{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000783 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200784 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300785
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000786 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300787
Avi Kivity83f3c252012-10-07 12:59:55 +0200788 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200789 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200790 addrrange_make(int128_zero(), int128_2_64()), false);
791 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200792 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300793
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000794 view->dispatch = address_space_dispatch_new(view);
795 for (i = 0; i < view->nr; i++) {
796 MemoryRegionSection mrs =
797 section_from_flat_range(&view->ranges[i], view);
798 flatview_add_to_dispatch(view, &mrs);
799 }
800 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000801 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000802
Avi Kivity093bc2c2011-07-26 14:26:01 +0300803 return view;
804}
805
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300806static void address_space_add_del_ioeventfds(AddressSpace *as,
807 MemoryRegionIoeventfd *fds_new,
808 unsigned fds_new_nb,
809 MemoryRegionIoeventfd *fds_old,
810 unsigned fds_old_nb)
811{
812 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200813 MemoryRegionIoeventfd *fd;
814 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300815
816 /* Generate a symmetric difference of the old and new fd sets, adding
817 * and deleting as necessary.
818 */
819
820 iold = inew = 0;
821 while (iold < fds_old_nb || inew < fds_new_nb) {
822 if (iold < fds_old_nb
823 && (inew == fds_new_nb
824 || memory_region_ioeventfd_before(fds_old[iold],
825 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200826 fd = &fds_old[iold];
827 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000828 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200829 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200830 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200831 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200832 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200833 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300834 ++iold;
835 } else if (inew < fds_new_nb
836 && (iold == fds_old_nb
837 || memory_region_ioeventfd_before(fds_new[inew],
838 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200839 fd = &fds_new[inew];
840 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000841 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200842 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200843 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200844 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200845 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200846 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300847 ++inew;
848 } else {
849 ++iold;
850 ++inew;
851 }
852 }
853}
854
Paolo Bonzini856d7242013-05-06 11:57:21 +0200855static FlatView *address_space_get_flatview(AddressSpace *as)
856{
857 FlatView *view;
858
Paolo Bonzini374f2982013-05-17 12:37:03 +0200859 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200860 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000861 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200862 /* If somebody has replaced as->current_map concurrently,
863 * flatview_ref returns false.
864 */
865 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200866 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200867 return view;
868}
869
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300870static void address_space_update_ioeventfds(AddressSpace *as)
871{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200872 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300873 FlatRange *fr;
874 unsigned ioeventfd_nb = 0;
875 MemoryRegionIoeventfd *ioeventfds = NULL;
876 AddrRange tmp;
877 unsigned i;
878
Paolo Bonzini856d7242013-05-06 11:57:21 +0200879 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200880 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300881 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
882 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200883 int128_sub(fr->addr.start,
884 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300885 if (addrrange_intersects(fr->addr, tmp)) {
886 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500887 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300888 ioeventfd_nb * sizeof(*ioeventfds));
889 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
890 ioeventfds[ioeventfd_nb-1].addr = tmp;
891 }
892 }
893 }
894
895 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
896 as->ioeventfds, as->ioeventfd_nb);
897
Anthony Liguori7267c092011-08-20 22:09:37 -0500898 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300899 as->ioeventfds = ioeventfds;
900 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200901 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300902}
903
Avi Kivityb8af1af2011-07-26 14:26:12 +0300904static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200905 const FlatView *old_view,
906 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300907 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300908{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300909 unsigned iold, inew;
910 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300911
912 /* Generate a symmetric difference of the old and new memory maps.
913 * Kill ranges in the old map, and instantiate ranges in the new map.
914 */
915 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200916 while (iold < old_view->nr || inew < new_view->nr) {
917 if (iold < old_view->nr) {
918 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300919 } else {
920 frold = NULL;
921 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200922 if (inew < new_view->nr) {
923 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300924 } else {
925 frnew = NULL;
926 }
927
928 if (frold
929 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200930 || int128_lt(frold->addr.start, frnew->addr.start)
931 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300932 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000933 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300934
Avi Kivityb8af1af2011-07-26 14:26:12 +0300935 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200936 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300937 }
938
Avi Kivity093bc2c2011-07-26 14:26:01 +0300939 ++iold;
940 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000941 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300942
Avi Kivityb8af1af2011-07-26 14:26:12 +0300943 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200944 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200945 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
946 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
947 frold->dirty_log_mask,
948 frnew->dirty_log_mask);
949 }
950 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
951 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
952 frold->dirty_log_mask,
953 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300954 }
Avi Kivity5a583342011-07-26 14:26:02 +0300955 }
956
Avi Kivity093bc2c2011-07-26 14:26:01 +0300957 ++iold;
958 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300959 } else {
960 /* In new */
961
Avi Kivityb8af1af2011-07-26 14:26:12 +0300962 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200963 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300964 }
965
Avi Kivity093bc2c2011-07-26 14:26:01 +0300966 ++inew;
967 }
968 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300969}
970
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000971static void flatviews_init(void)
972{
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000973 static FlatView *empty_view;
974
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000975 if (flat_views) {
976 return;
977 }
978
979 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
980 (GDestroyNotify) flatview_unref);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000981 if (!empty_view) {
982 empty_view = generate_memory_topology(NULL);
983 /* We keep it alive forever in the global variable. */
984 flatview_ref(empty_view);
985 } else {
986 g_hash_table_replace(flat_views, NULL, empty_view);
987 flatview_ref(empty_view);
988 }
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000989}
990
991static void flatviews_reset(void)
992{
993 AddressSpace *as;
994
995 if (flat_views) {
996 g_hash_table_unref(flat_views);
997 flat_views = NULL;
998 }
999 flatviews_init();
1000
1001 /* Render unique FVs */
1002 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1003 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1004
1005 if (g_hash_table_lookup(flat_views, physmr)) {
1006 continue;
1007 }
1008
1009 generate_memory_topology(physmr);
1010 }
1011}
1012
1013static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +03001014{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001015 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001016 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1017 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1018
1019 assert(new_view);
1020
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001021 if (old_view == new_view) {
1022 return;
1023 }
1024
1025 if (old_view) {
1026 flatview_ref(old_view);
1027 }
1028
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001029 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001030
1031 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001032 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1033
1034 if (!old_view2) {
1035 old_view2 = &tmpview;
1036 }
1037 address_space_update_topology_pass(as, old_view2, new_view, false);
1038 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001039 }
Avi Kivityb8af1af2011-07-26 14:26:12 +03001040
Paolo Bonzini374f2982013-05-17 12:37:03 +02001041 /* Writes are protected by the BQL. */
1042 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001043 if (old_view) {
1044 flatview_unref(old_view);
1045 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001046
1047 /* Note that all the old MemoryRegions are still alive up to this
1048 * point. This relieves most MemoryListeners from the need to
1049 * ref/unref the MemoryRegions they get---unless they use them
1050 * outside the iothread mutex, in which case precise reference
1051 * counting is necessary.
1052 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001053 if (old_view) {
1054 flatview_unref(old_view);
1055 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001056}
1057
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001058static void address_space_update_topology(AddressSpace *as)
1059{
1060 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1061
1062 flatviews_init();
1063 if (!g_hash_table_lookup(flat_views, physmr)) {
1064 generate_memory_topology(physmr);
1065 }
1066 address_space_set_flatview(as);
1067}
1068
Avi Kivity4ef4db82011-07-26 14:26:13 +03001069void memory_region_transaction_begin(void)
1070{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001071 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001072 ++memory_region_transaction_depth;
1073}
1074
1075void memory_region_transaction_commit(void)
1076{
Avi Kivity0d673e32012-10-02 15:28:50 +02001077 AddressSpace *as;
1078
Avi Kivity4ef4db82011-07-26 14:26:13 +03001079 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001080 assert(qemu_mutex_iothread_locked());
1081
Avi Kivity4ef4db82011-07-26 14:26:13 +03001082 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001083 if (!memory_region_transaction_depth) {
1084 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001085 flatviews_reset();
1086
Gonglei4dc56152014-05-08 11:47:32 +08001087 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001088
Gonglei4dc56152014-05-08 11:47:32 +08001089 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001090 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001091 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001092 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001093 memory_region_update_pending = false;
linzhecheng0b152092018-01-14 20:55:19 +08001094 ioeventfd_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001095 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1096 } else if (ioeventfd_update_pending) {
1097 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1098 address_space_update_ioeventfds(as);
1099 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001100 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001101 }
Gonglei4dc56152014-05-08 11:47:32 +08001102 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001103}
1104
Avi Kivity545e92e2011-08-08 19:58:48 +03001105static void memory_region_destructor_none(MemoryRegion *mr)
1106{
1107}
1108
1109static void memory_region_destructor_ram(MemoryRegion *mr)
1110{
Fam Zhengf1060c52016-03-01 14:18:22 +08001111 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001112}
1113
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001114static bool memory_region_need_escape(char c)
1115{
1116 return c == '/' || c == '[' || c == '\\' || c == ']';
1117}
1118
1119static char *memory_region_escape_name(const char *name)
1120{
1121 const char *p;
1122 char *escaped, *q;
1123 uint8_t c;
1124 size_t bytes = 0;
1125
1126 for (p = name; *p; p++) {
1127 bytes += memory_region_need_escape(*p) ? 4 : 1;
1128 }
1129 if (bytes == p - name) {
1130 return g_memdup(name, bytes + 1);
1131 }
1132
1133 escaped = g_malloc(bytes + 1);
1134 for (p = name, q = escaped; *p; p++) {
1135 c = *p;
1136 if (unlikely(memory_region_need_escape(c))) {
1137 *q++ = '\\';
1138 *q++ = 'x';
1139 *q++ = "0123456789abcdef"[c >> 4];
1140 c = "0123456789abcdef"[c & 15];
1141 }
1142 *q++ = c;
1143 }
1144 *q = 0;
1145 return escaped;
1146}
1147
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001148static void memory_region_do_init(MemoryRegion *mr,
1149 Object *owner,
1150 const char *name,
1151 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001152{
Avi Kivity08dafab2011-10-16 13:19:17 +02001153 mr->size = int128_make64(size);
1154 if (size == UINT64_MAX) {
1155 mr->size = int128_2_64();
1156 }
Peter Maydell302fa282014-08-19 20:05:46 +01001157 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001158 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001159 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001160
1161 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001162 char *escaped_name = memory_region_escape_name(name);
1163 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001164
1165 if (!owner) {
1166 owner = container_get(qdev_get_machine(), "/unattached");
1167 }
1168
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001169 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001170 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001171 g_free(name_array);
1172 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001173 }
1174}
1175
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001176void memory_region_init(MemoryRegion *mr,
1177 Object *owner,
1178 const char *name,
1179 uint64_t size)
1180{
1181 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1182 memory_region_do_init(mr, owner, name, size);
1183}
1184
Eric Blaked7bce992016-01-29 06:48:55 -07001185static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1186 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001187{
1188 MemoryRegion *mr = MEMORY_REGION(obj);
1189 uint64_t value = mr->addr;
1190
Eric Blake51e72bc2016-01-29 06:48:54 -07001191 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001192}
1193
Eric Blaked7bce992016-01-29 06:48:55 -07001194static void memory_region_get_container(Object *obj, Visitor *v,
1195 const char *name, void *opaque,
1196 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001197{
1198 MemoryRegion *mr = MEMORY_REGION(obj);
1199 gchar *path = (gchar *)"";
1200
1201 if (mr->container) {
1202 path = object_get_canonical_path(OBJECT(mr->container));
1203 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001204 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001205 if (mr->container) {
1206 g_free(path);
1207 }
1208}
1209
1210static Object *memory_region_resolve_container(Object *obj, void *opaque,
1211 const char *part)
1212{
1213 MemoryRegion *mr = MEMORY_REGION(obj);
1214
1215 return OBJECT(mr->container);
1216}
1217
Eric Blaked7bce992016-01-29 06:48:55 -07001218static void memory_region_get_priority(Object *obj, Visitor *v,
1219 const char *name, void *opaque,
1220 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001221{
1222 MemoryRegion *mr = MEMORY_REGION(obj);
1223 int32_t value = mr->priority;
1224
Eric Blake51e72bc2016-01-29 06:48:54 -07001225 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001226}
1227
Eric Blaked7bce992016-01-29 06:48:55 -07001228static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1229 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001230{
1231 MemoryRegion *mr = MEMORY_REGION(obj);
1232 uint64_t value = memory_region_size(mr);
1233
Eric Blake51e72bc2016-01-29 06:48:54 -07001234 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001235}
1236
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001237static void memory_region_initfn(Object *obj)
1238{
1239 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001240 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001241
1242 mr->ops = &unassigned_mem_ops;
1243 mr->enabled = true;
1244 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001245 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001246 mr->destructor = memory_region_destructor_none;
1247 QTAILQ_INIT(&mr->subregions);
1248 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001249
1250 op = object_property_add(OBJECT(mr), "container",
1251 "link<" TYPE_MEMORY_REGION ">",
1252 memory_region_get_container,
1253 NULL, /* memory_region_set_container */
1254 NULL, NULL, &error_abort);
1255 op->resolve = memory_region_resolve_container;
1256
1257 object_property_add(OBJECT(mr), "addr", "uint64",
1258 memory_region_get_addr,
1259 NULL, /* memory_region_set_addr */
1260 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001261 object_property_add(OBJECT(mr), "priority", "uint32",
1262 memory_region_get_priority,
1263 NULL, /* memory_region_set_priority */
1264 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001265 object_property_add(OBJECT(mr), "size", "uint64",
1266 memory_region_get_size,
1267 NULL, /* memory_region_set_size, */
1268 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001269}
1270
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001271static void iommu_memory_region_initfn(Object *obj)
1272{
1273 MemoryRegion *mr = MEMORY_REGION(obj);
1274
1275 mr->is_iommu = true;
1276}
1277
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001278static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1279 unsigned size)
1280{
1281#ifdef DEBUG_UNASSIGNED
1282 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1283#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001284 if (current_cpu != NULL) {
1285 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001286 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001287 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001288}
1289
1290static void unassigned_mem_write(void *opaque, hwaddr addr,
1291 uint64_t val, unsigned size)
1292{
1293#ifdef DEBUG_UNASSIGNED
1294 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1295#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001296 if (current_cpu != NULL) {
1297 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001298 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001299}
1300
Paolo Bonzinid1970632013-05-24 13:23:38 +02001301static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1302 unsigned size, bool is_write)
1303{
1304 return false;
1305}
1306
1307const MemoryRegionOps unassigned_mem_ops = {
1308 .valid.accepts = unassigned_mem_accepts,
1309 .endianness = DEVICE_NATIVE_ENDIAN,
1310};
1311
Alex Williamson4a2e2422016-10-31 09:53:03 -06001312static uint64_t memory_region_ram_device_read(void *opaque,
1313 hwaddr addr, unsigned size)
1314{
1315 MemoryRegion *mr = opaque;
1316 uint64_t data = (uint64_t)~0;
1317
1318 switch (size) {
1319 case 1:
1320 data = *(uint8_t *)(mr->ram_block->host + addr);
1321 break;
1322 case 2:
1323 data = *(uint16_t *)(mr->ram_block->host + addr);
1324 break;
1325 case 4:
1326 data = *(uint32_t *)(mr->ram_block->host + addr);
1327 break;
1328 case 8:
1329 data = *(uint64_t *)(mr->ram_block->host + addr);
1330 break;
1331 }
1332
1333 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1334
1335 return data;
1336}
1337
1338static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1339 uint64_t data, unsigned size)
1340{
1341 MemoryRegion *mr = opaque;
1342
1343 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1344
1345 switch (size) {
1346 case 1:
1347 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1348 break;
1349 case 2:
1350 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1351 break;
1352 case 4:
1353 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1354 break;
1355 case 8:
1356 *(uint64_t *)(mr->ram_block->host + addr) = data;
1357 break;
1358 }
1359}
1360
1361static const MemoryRegionOps ram_device_mem_ops = {
1362 .read = memory_region_ram_device_read,
1363 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001364 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001365 .valid = {
1366 .min_access_size = 1,
1367 .max_access_size = 8,
1368 .unaligned = true,
1369 },
1370 .impl = {
1371 .min_access_size = 1,
1372 .max_access_size = 8,
1373 .unaligned = true,
1374 },
1375};
1376
Paolo Bonzinid2702032013-05-24 11:55:06 +02001377bool memory_region_access_valid(MemoryRegion *mr,
1378 hwaddr addr,
1379 unsigned size,
1380 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001381{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001382 int access_size_min, access_size_max;
1383 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001384
Avi Kivity093bc2c2011-07-26 14:26:01 +03001385 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1386 return false;
1387 }
1388
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001389 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001390 return true;
1391 }
1392
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001393 access_size_min = mr->ops->valid.min_access_size;
1394 if (!mr->ops->valid.min_access_size) {
1395 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001396 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001397
1398 access_size_max = mr->ops->valid.max_access_size;
1399 if (!mr->ops->valid.max_access_size) {
1400 access_size_max = 4;
1401 }
1402
1403 access_size = MAX(MIN(size, access_size_max), access_size_min);
1404 for (i = 0; i < size; i += access_size) {
1405 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1406 is_write)) {
1407 return false;
1408 }
1409 }
1410
Avi Kivity093bc2c2011-07-26 14:26:01 +03001411 return true;
1412}
1413
Peter Maydellcc05c432015-04-26 16:49:23 +01001414static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1415 hwaddr addr,
1416 uint64_t *pval,
1417 unsigned size,
1418 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001419{
Peter Maydellcc05c432015-04-26 16:49:23 +01001420 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001421
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001422 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001423 return access_with_adjusted_size(addr, pval, size,
1424 mr->ops->impl.min_access_size,
1425 mr->ops->impl.max_access_size,
1426 memory_region_read_accessor,
1427 mr, attrs);
1428 } else if (mr->ops->read_with_attrs) {
1429 return access_with_adjusted_size(addr, pval, size,
1430 mr->ops->impl.min_access_size,
1431 mr->ops->impl.max_access_size,
1432 memory_region_read_with_attrs_accessor,
1433 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001434 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001435 return access_with_adjusted_size(addr, pval, size, 1, 4,
1436 memory_region_oldmmio_read_accessor,
1437 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001438 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001439}
1440
Peter Maydell3b643492015-04-26 16:49:23 +01001441MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1442 hwaddr addr,
1443 uint64_t *pval,
1444 unsigned size,
1445 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001446{
Peter Maydellcc05c432015-04-26 16:49:23 +01001447 MemTxResult r;
1448
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001449 if (!memory_region_access_valid(mr, addr, size, false)) {
1450 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001451 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001452 }
Avi Kivitya621f382012-01-02 13:12:08 +02001453
Peter Maydellcc05c432015-04-26 16:49:23 +01001454 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001455 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001456 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001457}
1458
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001459/* Return true if an eventfd was signalled */
1460static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1461 hwaddr addr,
1462 uint64_t data,
1463 unsigned size,
1464 MemTxAttrs attrs)
1465{
1466 MemoryRegionIoeventfd ioeventfd = {
1467 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1468 .data = data,
1469 };
1470 unsigned i;
1471
1472 for (i = 0; i < mr->ioeventfd_nb; i++) {
1473 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1474 ioeventfd.e = mr->ioeventfds[i].e;
1475
1476 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1477 event_notifier_set(ioeventfd.e);
1478 return true;
1479 }
1480 }
1481
1482 return false;
1483}
1484
Peter Maydell3b643492015-04-26 16:49:23 +01001485MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1486 hwaddr addr,
1487 uint64_t data,
1488 unsigned size,
1489 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001490{
Avi Kivity897fa7c2011-11-13 13:05:27 +02001491 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001492 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001493 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001494 }
1495
Avi Kivitya621f382012-01-02 13:12:08 +02001496 adjust_endianness(mr, &data, size);
1497
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001498 if ((!kvm_eventfds_enabled()) &&
1499 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1500 return MEMTX_OK;
1501 }
1502
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001503 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001504 return access_with_adjusted_size(addr, &data, size,
1505 mr->ops->impl.min_access_size,
1506 mr->ops->impl.max_access_size,
1507 memory_region_write_accessor, mr,
1508 attrs);
1509 } else if (mr->ops->write_with_attrs) {
1510 return
1511 access_with_adjusted_size(addr, &data, size,
1512 mr->ops->impl.min_access_size,
1513 mr->ops->impl.max_access_size,
1514 memory_region_write_with_attrs_accessor,
1515 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001516 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001517 return access_with_adjusted_size(addr, &data, size, 1, 4,
1518 memory_region_oldmmio_write_accessor,
1519 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001520 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001521}
1522
Avi Kivity093bc2c2011-07-26 14:26:01 +03001523void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001524 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001525 const MemoryRegionOps *ops,
1526 void *opaque,
1527 const char *name,
1528 uint64_t size)
1529{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001530 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001531 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001532 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001533 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001534}
1535
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001536void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1537 Object *owner,
1538 const char *name,
1539 uint64_t size,
1540 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001541{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001542 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001543 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001544 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001545 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001546 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001547 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001548}
1549
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001550void memory_region_init_resizeable_ram(MemoryRegion *mr,
1551 Object *owner,
1552 const char *name,
1553 uint64_t size,
1554 uint64_t max_size,
1555 void (*resized)(const char*,
1556 uint64_t length,
1557 void *host),
1558 Error **errp)
1559{
1560 memory_region_init(mr, owner, name, size);
1561 mr->ram = true;
1562 mr->terminates = true;
1563 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001564 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1565 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001566 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001567}
1568
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001569#ifdef __linux__
1570void memory_region_init_ram_from_file(MemoryRegion *mr,
1571 struct Object *owner,
1572 const char *name,
1573 uint64_t size,
Haozhong Zhang98376842017-12-11 15:28:04 +08001574 uint64_t align,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001575 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001576 const char *path,
1577 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001578{
1579 memory_region_init(mr, owner, name, size);
1580 mr->ram = true;
1581 mr->terminates = true;
1582 mr->destructor = memory_region_destructor_ram;
Haozhong Zhang98376842017-12-11 15:28:04 +08001583 mr->align = align;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001584 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001585 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001586}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001587
1588void memory_region_init_ram_from_fd(MemoryRegion *mr,
1589 struct Object *owner,
1590 const char *name,
1591 uint64_t size,
1592 bool share,
1593 int fd,
1594 Error **errp)
1595{
1596 memory_region_init(mr, owner, name, size);
1597 mr->ram = true;
1598 mr->terminates = true;
1599 mr->destructor = memory_region_destructor_ram;
1600 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1601 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1602}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001603#endif
1604
Avi Kivity093bc2c2011-07-26 14:26:01 +03001605void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001606 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001607 const char *name,
1608 uint64_t size,
1609 void *ptr)
1610{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001611 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001612 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001613 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001614 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001615 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001616
1617 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1618 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001619 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001620}
1621
Alex Williamson21e00fa2016-10-31 09:53:03 -06001622void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1623 Object *owner,
1624 const char *name,
1625 uint64_t size,
1626 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301627{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001628 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1629 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001630 mr->ops = &ram_device_mem_ops;
1631 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301632}
1633
Avi Kivity093bc2c2011-07-26 14:26:01 +03001634void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001635 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001636 const char *name,
1637 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001638 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001639 uint64_t size)
1640{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001641 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001642 mr->alias = orig;
1643 mr->alias_offset = offset;
1644}
1645
Peter Maydellb59821a2017-07-07 15:42:50 +01001646void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1647 struct Object *owner,
1648 const char *name,
1649 uint64_t size,
1650 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001651{
1652 memory_region_init(mr, owner, name, size);
1653 mr->ram = true;
1654 mr->readonly = true;
1655 mr->terminates = true;
1656 mr->destructor = memory_region_destructor_ram;
1657 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1658 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1659}
1660
Peter Maydellb59821a2017-07-07 15:42:50 +01001661void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1662 Object *owner,
1663 const MemoryRegionOps *ops,
1664 void *opaque,
1665 const char *name,
1666 uint64_t size,
1667 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001668{
Peter Maydell39e0b032016-07-04 13:06:35 +01001669 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001670 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001671 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001672 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001673 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001674 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001675 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001676 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001677}
1678
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001679void memory_region_init_iommu(void *_iommu_mr,
1680 size_t instance_size,
1681 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001682 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001683 const char *name,
1684 uint64_t size)
1685{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001686 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001687 struct MemoryRegion *mr;
1688
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001689 object_initialize(_iommu_mr, instance_size, mrtypename);
1690 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001691 memory_region_do_init(mr, owner, name, size);
1692 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001693 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001694 QLIST_INIT(&iommu_mr->iommu_notify);
1695 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001696}
1697
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001698static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001699{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001700 MemoryRegion *mr = MEMORY_REGION(obj);
1701
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001702 assert(!mr->container);
1703
1704 /* We know the region is not visible in any address space (it
1705 * does not have a container and cannot be a root either because
1706 * it has no references, so we can blindly clear mr->enabled.
1707 * memory_region_set_enabled instead could trigger a transaction
1708 * and cause an infinite loop.
1709 */
1710 mr->enabled = false;
1711 memory_region_transaction_begin();
1712 while (!QTAILQ_EMPTY(&mr->subregions)) {
1713 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1714 memory_region_del_subregion(mr, subregion);
1715 }
1716 memory_region_transaction_commit();
1717
Avi Kivity545e92e2011-08-08 19:58:48 +03001718 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001719 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001720 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001721 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001722}
1723
Paolo Bonzini803c0812013-05-07 06:59:09 +02001724Object *memory_region_owner(MemoryRegion *mr)
1725{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001726 Object *obj = OBJECT(mr);
1727 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001728}
1729
Paolo Bonzini46637be2013-05-07 09:06:00 +02001730void memory_region_ref(MemoryRegion *mr)
1731{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001732 /* MMIO callbacks most likely will access data that belongs
1733 * to the owner, hence the need to ref/unref the owner whenever
1734 * the memory region is in use.
1735 *
1736 * The memory region is a child of its owner. As long as the
1737 * owner doesn't call unparent itself on the memory region,
1738 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001739 * Memory regions without an owner are supposed to never go away;
1740 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001741 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001742 if (mr && mr->owner) {
1743 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001744 }
1745}
1746
1747void memory_region_unref(MemoryRegion *mr)
1748{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001749 if (mr && mr->owner) {
1750 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001751 }
1752}
1753
Avi Kivity093bc2c2011-07-26 14:26:01 +03001754uint64_t memory_region_size(MemoryRegion *mr)
1755{
Avi Kivity08dafab2011-10-16 13:19:17 +02001756 if (int128_eq(mr->size, int128_2_64())) {
1757 return UINT64_MAX;
1758 }
1759 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001760}
1761
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001762const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001763{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001764 if (!mr->name) {
1765 ((MemoryRegion *)mr)->name =
1766 object_get_canonical_path_component(OBJECT(mr));
1767 }
Peter Maydell302fa282014-08-19 20:05:46 +01001768 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001769}
1770
Alex Williamson21e00fa2016-10-31 09:53:03 -06001771bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301772{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001773 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301774}
1775
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001776uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001777{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001778 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001779 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001780 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1781 }
1782 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001783}
1784
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001785bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1786{
1787 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1788}
1789
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001790static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001791{
1792 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1793 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001794 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001795
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001796 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001797 flags |= iommu_notifier->notifier_flags;
1798 }
1799
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001800 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1801 imrc->notify_flag_changed(iommu_mr,
1802 iommu_mr->iommu_notify_flags,
1803 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001804 }
1805
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001806 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001807}
1808
Peter Xucdb30812016-09-23 13:02:26 +08001809void memory_region_register_iommu_notifier(MemoryRegion *mr,
1810 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001811{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001812 IOMMUMemoryRegion *iommu_mr;
1813
Jason Wangefcd38c2016-12-30 18:09:17 +08001814 if (mr->alias) {
1815 memory_region_register_iommu_notifier(mr->alias, n);
1816 return;
1817 }
1818
Peter Xucdb30812016-09-23 13:02:26 +08001819 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001820 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001821 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001822 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001823 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1824 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001825}
1826
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001827uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001828{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001829 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1830
1831 if (imrc->get_min_page_size) {
1832 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001833 }
1834 return TARGET_PAGE_SIZE;
1835}
1836
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001837void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001838{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001839 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001840 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001841 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001842 IOMMUTLBEntry iotlb;
1843
Peter Xufaa362e2017-04-07 18:59:11 +08001844 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001845 if (imrc->replay) {
1846 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001847 return;
1848 }
1849
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001850 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001851
David Gibsona788f222015-09-30 12:13:55 +10001852 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001853 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001854 if (iotlb.perm != IOMMU_NONE) {
1855 n->notify(n, &iotlb);
1856 }
1857
1858 /* if (2^64 - MR size) < granularity, it's possible to get an
1859 * infinite loop here. This should catch such a wraparound */
1860 if ((addr + granularity) < addr) {
1861 break;
1862 }
1863 }
1864}
1865
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001866void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001867{
1868 IOMMUNotifier *notifier;
1869
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001870 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1871 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001872 }
1873}
1874
Peter Xucdb30812016-09-23 13:02:26 +08001875void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1876 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001877{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001878 IOMMUMemoryRegion *iommu_mr;
1879
Jason Wangefcd38c2016-12-30 18:09:17 +08001880 if (mr->alias) {
1881 memory_region_unregister_iommu_notifier(mr->alias, n);
1882 return;
1883 }
Peter Xucdb30812016-09-23 13:02:26 +08001884 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001885 iommu_mr = IOMMU_MEMORY_REGION(mr);
1886 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001887}
1888
Peter Xubd2bfa42017-04-07 18:59:10 +08001889void memory_region_notify_one(IOMMUNotifier *notifier,
1890 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001891{
Peter Xucdb30812016-09-23 13:02:26 +08001892 IOMMUNotifierFlag request_flags;
1893
Peter Xubd2bfa42017-04-07 18:59:10 +08001894 /*
1895 * Skip the notification if the notification does not overlap
1896 * with registered range.
1897 */
Maxime Coquelinb021d1c2017-10-10 11:42:47 +02001898 if (notifier->start > entry->iova + entry->addr_mask ||
Peter Xubd2bfa42017-04-07 18:59:10 +08001899 notifier->end < entry->iova) {
1900 return;
1901 }
Peter Xucdb30812016-09-23 13:02:26 +08001902
Peter Xubd2bfa42017-04-07 18:59:10 +08001903 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001904 request_flags = IOMMU_NOTIFIER_MAP;
1905 } else {
1906 request_flags = IOMMU_NOTIFIER_UNMAP;
1907 }
1908
Peter Xubd2bfa42017-04-07 18:59:10 +08001909 if (notifier->notifier_flags & request_flags) {
1910 notifier->notify(notifier, entry);
1911 }
1912}
1913
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001914void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001915 IOMMUTLBEntry entry)
1916{
1917 IOMMUNotifier *iommu_notifier;
1918
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001919 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001920
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001921 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001922 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001923 }
David Gibson06866572013-05-14 19:13:56 +10001924}
1925
Avi Kivity093bc2c2011-07-26 14:26:01 +03001926void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1927{
Avi Kivity5a583342011-07-26 14:26:02 +03001928 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001929 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001930
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001931 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001932 old_logging = mr->vga_logging_count;
1933 mr->vga_logging_count += log ? 1 : -1;
1934 if (!!old_logging == !!mr->vga_logging_count) {
1935 return;
1936 }
1937
Jan Kiszka59023ef2012-08-23 13:02:30 +02001938 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001939 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001940 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001941 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001942}
1943
Avi Kivitya8170e52012-10-23 12:30:10 +02001944bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1945 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001946{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001947 assert(mr->ram_block);
1948 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1949 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001950}
1951
Avi Kivitya8170e52012-10-23 12:30:10 +02001952void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1953 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001954{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001955 assert(mr->ram_block);
1956 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1957 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001958 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001959}
1960
Juan Quintela6c279db2012-10-17 20:24:28 +02001961bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1962 hwaddr size, unsigned client)
1963{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001964 assert(mr->ram_block);
1965 return cpu_physical_memory_test_and_clear_dirty(
1966 memory_region_get_ram_addr(mr) + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001967}
1968
Gerd Hoffmann8deaf122017-04-21 11:16:25 +02001969DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1970 hwaddr addr,
1971 hwaddr size,
1972 unsigned client)
1973{
1974 assert(mr->ram_block);
1975 return cpu_physical_memory_snapshot_and_clear_dirty(
1976 memory_region_get_ram_addr(mr) + addr, size, client);
1977}
1978
1979bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1980 hwaddr addr, hwaddr size)
1981{
1982 assert(mr->ram_block);
1983 return cpu_physical_memory_snapshot_get_dirty(snap,
1984 memory_region_get_ram_addr(mr) + addr, size);
1985}
Juan Quintela6c279db2012-10-17 20:24:28 +02001986
Avi Kivity093bc2c2011-07-26 14:26:01 +03001987void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1988{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001989 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001990 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001991 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001992 FlatRange *fr;
1993
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001994 /* If the same address space has multiple log_sync listeners, we
1995 * visit that address space's FlatView multiple times. But because
1996 * log_sync listeners are rare, it's still cheaper than walking each
1997 * address space once.
1998 */
1999 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2000 if (!listener->log_sync) {
2001 continue;
2002 }
2003 as = listener->address_space;
2004 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002005 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02002006 if (fr->mr == mr) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002007 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002008 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02002009 }
Avi Kivity5a583342011-07-26 14:26:02 +03002010 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002011 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03002012 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002013}
2014
2015void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2016{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002017 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002018 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002019 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01002020 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002021 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002022 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002023}
2024
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002025void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002026{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002027 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002028 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002029 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01002030 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002031 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002032 }
2033}
2034
Avi Kivitya8170e52012-10-23 12:30:10 +02002035void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2036 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002037{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002038 assert(mr->ram_block);
2039 cpu_physical_memory_test_and_clear_dirty(
2040 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002041}
2042
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002043int memory_region_get_fd(MemoryRegion *mr)
2044{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002045 int fd;
2046
2047 rcu_read_lock();
2048 while (mr->alias) {
2049 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002050 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002051 fd = mr->ram_block->fd;
2052 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002053
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002054 return fd;
2055}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002056
Avi Kivity093bc2c2011-07-26 14:26:01 +03002057void *memory_region_get_ram_ptr(MemoryRegion *mr)
2058{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002059 void *ptr;
2060 uint64_t offset = 0;
2061
2062 rcu_read_lock();
2063 while (mr->alias) {
2064 offset += mr->alias_offset;
2065 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002066 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002067 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002068 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002069 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002070
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002071 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002072}
2073
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002074MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2075{
2076 RAMBlock *block;
2077
2078 block = qemu_ram_block_from_host(ptr, false, offset);
2079 if (!block) {
2080 return NULL;
2081 }
2082
2083 return block->mr;
2084}
2085
Fam Zheng7ebb2742016-03-01 14:18:20 +08002086ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2087{
2088 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2089}
2090
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002091void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2092{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002093 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002094
Gongleifa53a0e2016-05-10 10:04:59 +08002095 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002096}
2097
Avi Kivity0d673e32012-10-02 15:28:50 +02002098static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002099{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002100 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002101 FlatRange *fr;
2102 CoalescedMemoryRange *cmr;
2103 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02002104 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002105
Paolo Bonzini856d7242013-05-06 11:57:21 +02002106 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002107 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002108 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02002109 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002110 .fv = view,
Avi Kivity95d29942012-10-02 18:21:54 +02002111 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002112 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02002113 };
2114
Paolo Bonzini9a546352016-09-22 16:23:06 +02002115 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002116 int128_get64(fr->addr.start),
2117 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002118 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2119 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02002120 int128_sub(fr->addr.start,
2121 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002122 if (!addrrange_intersects(tmp, fr->addr)) {
2123 continue;
2124 }
2125 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002126 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002127 int128_get64(tmp.start),
2128 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002129 }
2130 }
2131 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002132 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002133}
2134
Avi Kivity0d673e32012-10-02 15:28:50 +02002135static void memory_region_update_coalesced_range(MemoryRegion *mr)
2136{
2137 AddressSpace *as;
2138
2139 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2140 memory_region_update_coalesced_range_as(mr, as);
2141 }
2142}
2143
Avi Kivity093bc2c2011-07-26 14:26:01 +03002144void memory_region_set_coalescing(MemoryRegion *mr)
2145{
2146 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002147 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002148}
2149
2150void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002151 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002152 uint64_t size)
2153{
Anthony Liguori7267c092011-08-20 22:09:37 -05002154 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002155
Avi Kivity08dafab2011-10-16 13:19:17 +02002156 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002157 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2158 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002159 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002160}
2161
2162void memory_region_clear_coalescing(MemoryRegion *mr)
2163{
2164 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002165 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002166
Jan Kiszkad4105152012-08-23 13:02:29 +02002167 qemu_flush_coalesced_mmio_buffer();
2168 mr->flush_coalesced_mmio = false;
2169
Avi Kivity093bc2c2011-07-26 14:26:01 +03002170 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2171 cmr = QTAILQ_FIRST(&mr->coalesced);
2172 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002173 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002174 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002175 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002176
2177 if (updated) {
2178 memory_region_update_coalesced_range(mr);
2179 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002180}
2181
Jan Kiszkad4105152012-08-23 13:02:29 +02002182void memory_region_set_flush_coalesced(MemoryRegion *mr)
2183{
2184 mr->flush_coalesced_mmio = true;
2185}
2186
2187void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2188{
2189 qemu_flush_coalesced_mmio_buffer();
2190 if (QTAILQ_EMPTY(&mr->coalesced)) {
2191 mr->flush_coalesced_mmio = false;
2192 }
2193}
2194
Jan Kiszka196ea132015-06-18 18:47:20 +02002195void memory_region_clear_global_locking(MemoryRegion *mr)
2196{
2197 mr->global_locking = false;
2198}
2199
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002200static bool userspace_eventfd_warning;
2201
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002202void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002203 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002204 unsigned size,
2205 bool match_data,
2206 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002207 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002208{
2209 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002210 .addr.start = int128_make64(addr),
2211 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002212 .match_data = match_data,
2213 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002214 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002215 };
2216 unsigned i;
2217
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002218 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2219 userspace_eventfd_warning))) {
2220 userspace_eventfd_warning = true;
2221 error_report("Using eventfd without MMIO binding in KVM. "
2222 "Suboptimal performance expected");
2223 }
2224
Jason Wangb8aecea2015-11-06 16:02:45 +08002225 if (size) {
2226 adjust_endianness(mr, &mrfd.data, size);
2227 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002228 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002229 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2230 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2231 break;
2232 }
2233 }
2234 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002235 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002236 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2237 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2238 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2239 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002240 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002241 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002242}
2243
2244void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002245 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002246 unsigned size,
2247 bool match_data,
2248 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002249 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002250{
2251 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002252 .addr.start = int128_make64(addr),
2253 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002254 .match_data = match_data,
2255 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002256 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002257 };
2258 unsigned i;
2259
Jason Wangb8aecea2015-11-06 16:02:45 +08002260 if (size) {
2261 adjust_endianness(mr, &mrfd.data, size);
2262 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002263 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002264 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2265 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2266 break;
2267 }
2268 }
2269 assert(i != mr->ioeventfd_nb);
2270 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2271 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2272 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002273 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002274 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002275 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002276 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002277}
2278
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002279static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002280{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002281 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002282 MemoryRegion *other;
2283
Jan Kiszka59023ef2012-08-23 13:02:30 +02002284 memory_region_transaction_begin();
2285
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002286 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002287 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002288 if (subregion->priority >= other->priority) {
2289 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2290 goto done;
2291 }
2292 }
2293 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2294done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002295 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002296 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002297}
2298
Peter Crosthwaite05987012014-06-05 23:14:44 -07002299static void memory_region_add_subregion_common(MemoryRegion *mr,
2300 hwaddr offset,
2301 MemoryRegion *subregion)
2302{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002303 assert(!subregion->container);
2304 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002305 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002306 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002307}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002308
2309void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002310 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002311 MemoryRegion *subregion)
2312{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002313 subregion->priority = 0;
2314 memory_region_add_subregion_common(mr, offset, subregion);
2315}
2316
2317void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002318 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002319 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002320 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002321{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002322 subregion->priority = priority;
2323 memory_region_add_subregion_common(mr, offset, subregion);
2324}
2325
2326void memory_region_del_subregion(MemoryRegion *mr,
2327 MemoryRegion *subregion)
2328{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002329 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002330 assert(subregion->container == mr);
2331 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002332 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002333 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002334 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002335 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002336}
2337
2338void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2339{
2340 if (enabled == mr->enabled) {
2341 return;
2342 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002343 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002344 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002345 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002346 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002347}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002348
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002349void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2350{
2351 Int128 s = int128_make64(size);
2352
2353 if (size == UINT64_MAX) {
2354 s = int128_2_64();
2355 }
2356 if (int128_eq(s, mr->size)) {
2357 return;
2358 }
2359 memory_region_transaction_begin();
2360 mr->size = s;
2361 memory_region_update_pending = true;
2362 memory_region_transaction_commit();
2363}
2364
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002365static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002366{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002367 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002368
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002369 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002370 memory_region_transaction_begin();
2371 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002372 memory_region_del_subregion(container, mr);
2373 mr->container = container;
2374 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002375 memory_region_unref(mr);
2376 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002377 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002378}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002379
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002380void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2381{
2382 if (addr != mr->addr) {
2383 mr->addr = addr;
2384 memory_region_readd_subregion(mr);
2385 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002386}
2387
Avi Kivitya8170e52012-10-23 12:30:10 +02002388void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002389{
Avi Kivity47033592011-12-04 19:16:50 +02002390 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002391
Jan Kiszka59023ef2012-08-23 13:02:30 +02002392 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002393 return;
2394 }
2395
Jan Kiszka59023ef2012-08-23 13:02:30 +02002396 memory_region_transaction_begin();
2397 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002398 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002399 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002400}
2401
Igor Mammedova2b257d2014-10-31 16:38:37 +00002402uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2403{
2404 return mr->align;
2405}
2406
Avi Kivitye2177952011-12-08 15:00:18 +02002407static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2408{
2409 const AddrRange *addr = addr_;
2410 const FlatRange *fr = fr_;
2411
2412 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2413 return -1;
2414 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2415 return 1;
2416 }
2417 return 0;
2418}
2419
Paolo Bonzini99e86342013-05-06 10:26:13 +02002420static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002421{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002422 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002423 sizeof(FlatRange), cmp_flatrange_addr);
2424}
2425
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002426bool memory_region_is_mapped(MemoryRegion *mr)
2427{
2428 return mr->container ? true : false;
2429}
2430
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002431/* Same as memory_region_find, but it does not add a reference to the
2432 * returned region. It must be called from an RCU critical section.
2433 */
2434static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2435 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002436{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002437 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002438 MemoryRegion *root;
2439 AddressSpace *as;
2440 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002441 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002442 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002443
Paolo Bonzini73034e92013-05-07 15:48:28 +02002444 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002445 for (root = mr; root->container; ) {
2446 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002447 addr += root->addr;
2448 }
2449
2450 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002451 if (!as) {
2452 return ret;
2453 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002454 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002455
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002456 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002457 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002458 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002459 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002460 }
2461
Paolo Bonzini99e86342013-05-06 10:26:13 +02002462 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002463 --fr;
2464 }
2465
2466 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002467 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002468 range = addrrange_intersection(range, fr->addr);
2469 ret.offset_within_region = fr->offset_in_region;
2470 ret.offset_within_region += int128_get64(int128_sub(range.start,
2471 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002472 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002473 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002474 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002475 return ret;
2476}
2477
2478MemoryRegionSection memory_region_find(MemoryRegion *mr,
2479 hwaddr addr, uint64_t size)
2480{
2481 MemoryRegionSection ret;
2482 rcu_read_lock();
2483 ret = memory_region_find_rcu(mr, addr, size);
2484 if (ret.mr) {
2485 memory_region_ref(ret.mr);
2486 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002487 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002488 return ret;
2489}
2490
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002491bool memory_region_present(MemoryRegion *container, hwaddr addr)
2492{
2493 MemoryRegion *mr;
2494
2495 rcu_read_lock();
2496 mr = memory_region_find_rcu(container, addr, 1).mr;
2497 rcu_read_unlock();
2498 return mr && mr != container;
2499}
2500
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002501void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002502{
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002503 MemoryListener *listener;
2504 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002505 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002506 FlatRange *fr;
2507
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002508 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2509 if (!listener->log_sync) {
2510 continue;
2511 }
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002512 as = listener->address_space;
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002513 view = address_space_get_flatview(as);
2514 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonziniadaad612016-09-22 16:09:08 +02002515 if (fr->dirty_log_mask) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002516 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2517
Paolo Bonziniadaad612016-09-22 16:09:08 +02002518 listener->log_sync(listener, &mrs);
2519 }
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002520 }
2521 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002522 }
2523}
2524
Jay Zhou19310762017-07-28 18:28:53 +08002525static VMChangeStateEntry *vmstate_change;
2526
Avi Kivity7664e802011-12-11 14:47:25 +02002527void memory_global_dirty_log_start(void)
2528{
Jay Zhou19310762017-07-28 18:28:53 +08002529 if (vmstate_change) {
2530 qemu_del_vm_change_state_handler(vmstate_change);
2531 vmstate_change = NULL;
2532 }
2533
Avi Kivity7664e802011-12-11 14:47:25 +02002534 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002535
Avi Kivity7376e582012-02-08 21:05:17 +02002536 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002537
2538 /* Refresh DIRTY_LOG_MIGRATION bit. */
2539 memory_region_transaction_begin();
2540 memory_region_update_pending = true;
2541 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002542}
2543
Jay Zhou19310762017-07-28 18:28:53 +08002544static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002545{
Avi Kivity7664e802011-12-11 14:47:25 +02002546 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002547
2548 /* Refresh DIRTY_LOG_MIGRATION bit. */
2549 memory_region_transaction_begin();
2550 memory_region_update_pending = true;
2551 memory_region_transaction_commit();
2552
Avi Kivity7376e582012-02-08 21:05:17 +02002553 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002554}
2555
Jay Zhou19310762017-07-28 18:28:53 +08002556static void memory_vm_change_state_handler(void *opaque, int running,
2557 RunState state)
2558{
2559 if (running) {
2560 memory_global_dirty_log_do_stop();
2561
2562 if (vmstate_change) {
2563 qemu_del_vm_change_state_handler(vmstate_change);
2564 vmstate_change = NULL;
2565 }
2566 }
2567}
2568
2569void memory_global_dirty_log_stop(void)
2570{
2571 if (!runstate_is_running()) {
2572 if (vmstate_change) {
2573 return;
2574 }
2575 vmstate_change = qemu_add_vm_change_state_handler(
2576 memory_vm_change_state_handler, NULL);
2577 return;
2578 }
2579
2580 memory_global_dirty_log_do_stop();
2581}
2582
Avi Kivity7664e802011-12-11 14:47:25 +02002583static void listener_add_address_space(MemoryListener *listener,
2584 AddressSpace *as)
2585{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002586 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002587 FlatRange *fr;
2588
Paolo Bonzini680a4782015-11-02 09:23:52 +01002589 if (listener->begin) {
2590 listener->begin(listener);
2591 }
Avi Kivity7664e802011-12-11 14:47:25 +02002592 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002593 if (listener->log_global_start) {
2594 listener->log_global_start(listener);
2595 }
Avi Kivity7664e802011-12-11 14:47:25 +02002596 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002597
Paolo Bonzini856d7242013-05-06 11:57:21 +02002598 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002599 FOR_EACH_FLAT_RANGE(fr, view) {
David Hildenbrand279836f2017-10-16 16:43:02 +02002600 MemoryRegionSection section = section_from_flat_range(fr, view);
2601
Avi Kivity975aefe2012-10-02 16:39:57 +02002602 if (listener->region_add) {
2603 listener->region_add(listener, &section);
2604 }
David Hildenbrandae990e62017-10-16 16:42:56 +02002605 if (fr->dirty_log_mask && listener->log_start) {
2606 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2607 }
Avi Kivity7664e802011-12-11 14:47:25 +02002608 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002609 if (listener->commit) {
2610 listener->commit(listener);
2611 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002612 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002613}
2614
Peter Xud25836c2018-01-22 14:02:44 +08002615static void listener_del_address_space(MemoryListener *listener,
2616 AddressSpace *as)
2617{
2618 FlatView *view;
2619 FlatRange *fr;
2620
2621 if (listener->begin) {
2622 listener->begin(listener);
2623 }
2624 view = address_space_get_flatview(as);
2625 FOR_EACH_FLAT_RANGE(fr, view) {
2626 MemoryRegionSection section = section_from_flat_range(fr, view);
2627
2628 if (fr->dirty_log_mask && listener->log_stop) {
2629 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2630 }
2631 if (listener->region_del) {
2632 listener->region_del(listener, &section);
2633 }
2634 }
2635 if (listener->commit) {
2636 listener->commit(listener);
2637 }
2638 flatview_unref(view);
2639}
2640
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002641void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002642{
Avi Kivity72e22d22012-02-08 15:05:50 +02002643 MemoryListener *other = NULL;
2644
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002645 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002646 if (QTAILQ_EMPTY(&memory_listeners)
2647 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2648 memory_listeners)->priority) {
2649 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2650 } else {
2651 QTAILQ_FOREACH(other, &memory_listeners, link) {
2652 if (listener->priority < other->priority) {
2653 break;
2654 }
2655 }
2656 QTAILQ_INSERT_BEFORE(other, listener, link);
2657 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002658
Paolo Bonzini9a546352016-09-22 16:23:06 +02002659 if (QTAILQ_EMPTY(&as->listeners)
2660 || listener->priority >= QTAILQ_LAST(&as->listeners,
2661 memory_listeners)->priority) {
2662 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2663 } else {
2664 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2665 if (listener->priority < other->priority) {
2666 break;
2667 }
2668 }
2669 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2670 }
2671
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002672 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002673}
2674
2675void memory_listener_unregister(MemoryListener *listener)
2676{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002677 if (!listener->address_space) {
2678 return;
2679 }
2680
Peter Xud25836c2018-01-22 14:02:44 +08002681 listener_del_address_space(listener, listener->address_space);
Avi Kivity72e22d22012-02-08 15:05:50 +02002682 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002683 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002684 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002685}
Avi Kivitye2177952011-12-08 15:00:18 +02002686
KONRAD Fredericc9356742016-10-19 15:06:49 +02002687bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2688{
2689 void *host;
2690 unsigned size = 0;
2691 unsigned offset = 0;
2692 Object *new_interface;
2693
2694 if (!mr || !mr->ops->request_ptr) {
2695 return false;
2696 }
2697
2698 /*
2699 * Avoid an update if the request_ptr call
2700 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2701 * a cache.
2702 */
2703 memory_region_transaction_begin();
2704
2705 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2706
2707 if (!host || !size) {
2708 memory_region_transaction_commit();
2709 return false;
2710 }
2711
2712 new_interface = object_new("mmio_interface");
2713 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2714 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2715 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2716 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2717 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2718 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2719
2720 memory_region_transaction_commit();
2721 return true;
2722}
2723
2724typedef struct MMIOPtrInvalidate {
2725 MemoryRegion *mr;
2726 hwaddr offset;
2727 unsigned size;
2728 int busy;
2729 int allocated;
2730} MMIOPtrInvalidate;
2731
2732#define MAX_MMIO_INVALIDATE 10
2733static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2734
2735static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2736 run_on_cpu_data data)
2737{
2738 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2739 MemoryRegion *mr = invalidate_data->mr;
2740 hwaddr offset = invalidate_data->offset;
2741 unsigned size = invalidate_data->size;
2742 MemoryRegionSection section = memory_region_find(mr, offset, size);
2743
2744 qemu_mutex_lock_iothread();
2745
2746 /* Reset dirty so this doesn't happen later. */
2747 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2748
2749 if (section.mr != mr) {
2750 /* memory_region_find add a ref on section.mr */
2751 memory_region_unref(section.mr);
2752 if (MMIO_INTERFACE(section.mr->owner)) {
2753 /* We found the interface just drop it. */
2754 object_property_set_bool(section.mr->owner, false, "realized",
2755 NULL);
2756 object_unref(section.mr->owner);
2757 object_unparent(section.mr->owner);
2758 }
2759 }
2760
2761 qemu_mutex_unlock_iothread();
2762
2763 if (invalidate_data->allocated) {
2764 g_free(invalidate_data);
2765 } else {
2766 invalidate_data->busy = 0;
2767 }
2768}
2769
2770void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2771 unsigned size)
2772{
2773 size_t i;
2774 MMIOPtrInvalidate *invalidate_data = NULL;
2775
2776 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2777 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2778 invalidate_data = &mmio_ptr_invalidate_list[i];
2779 break;
2780 }
2781 }
2782
2783 if (!invalidate_data) {
2784 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2785 invalidate_data->allocated = 1;
2786 }
2787
2788 invalidate_data->mr = mr;
2789 invalidate_data->offset = offset;
2790 invalidate_data->size = size;
2791
2792 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2793 RUN_ON_CPU_HOST_PTR(invalidate_data));
2794}
2795
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002796void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002797{
Paolo Bonziniac951902015-02-11 15:21:04 +01002798 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002799 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002800 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002801 as->ioeventfd_nb = 0;
2802 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002803 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002804 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002805 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002806 address_space_update_topology(as);
2807 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002808}
Avi Kivity658b2222011-07-26 14:26:08 +03002809
Paolo Bonzini374f2982013-05-17 12:37:03 +02002810static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002811{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002812 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002813
Paolo Bonzini856d7242013-05-06 11:57:21 +02002814 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002815 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002816 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002817 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002818}
2819
Paolo Bonzini374f2982013-05-17 12:37:03 +02002820void address_space_destroy(AddressSpace *as)
2821{
Paolo Bonziniac951902015-02-11 15:21:04 +01002822 MemoryRegion *root = as->root;
2823
Paolo Bonzini374f2982013-05-17 12:37:03 +02002824 /* Flush out anything from MemoryListeners listening in on this */
2825 memory_region_transaction_begin();
2826 as->root = NULL;
2827 memory_region_transaction_commit();
2828 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2829
2830 /* At this point, as->dispatch and as->current_map are dummy
2831 * entries that the guest should never use. Wait for the old
2832 * values to expire before freeing the data.
2833 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002834 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002835 call_rcu(as, do_address_space_destroy, rcu);
2836}
2837
Peter Xu4e831902017-01-16 16:40:04 +08002838static const char *memory_region_type(MemoryRegion *mr)
2839{
2840 if (memory_region_is_ram_device(mr)) {
2841 return "ramd";
2842 } else if (memory_region_is_romd(mr)) {
2843 return "romd";
2844 } else if (memory_region_is_rom(mr)) {
2845 return "rom";
2846 } else if (memory_region_is_ram(mr)) {
2847 return "ram";
2848 } else {
2849 return "i/o";
2850 }
2851}
2852
Blue Swirl314e2982011-09-11 20:22:05 +00002853typedef struct MemoryRegionList MemoryRegionList;
2854
2855struct MemoryRegionList {
2856 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002857 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002858};
2859
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002860typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002861
Peter Xu4e831902017-01-16 16:40:04 +08002862#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2863 int128_sub((size), int128_one())) : 0)
2864#define MTREE_INDENT " "
2865
Blue Swirl314e2982011-09-11 20:22:05 +00002866static void mtree_print_mr(fprintf_function mon_printf, void *f,
2867 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002868 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002869 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002870{
Jan Kiszka9479c572011-09-27 15:00:41 +02002871 MemoryRegionList *new_ml, *ml, *next_ml;
2872 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002873 const MemoryRegion *submr;
2874 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002875 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002876
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002877 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002878 return;
2879 }
2880
2881 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002882 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002883 }
2884
Peter Xub31f8412017-03-14 20:56:27 +08002885 cur_start = base + mr->addr;
2886 cur_end = cur_start + MR_SIZE(mr->size);
2887
2888 /*
2889 * Try to detect overflow of memory region. This should never
2890 * happen normally. When it happens, we dump something to warn the
2891 * user who is observing this.
2892 */
2893 if (cur_start < base || cur_end < cur_start) {
2894 mon_printf(f, "[DETECTED OVERFLOW!] ");
2895 }
2896
Blue Swirl314e2982011-09-11 20:22:05 +00002897 if (mr->alias) {
2898 MemoryRegionList *ml;
2899 bool found = false;
2900
2901 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002902 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002903 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002904 found = true;
2905 }
2906 }
2907
2908 if (!found) {
2909 ml = g_new(MemoryRegionList, 1);
2910 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002911 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002912 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002913 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002914 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002915 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002916 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002917 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002918 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002919 memory_region_name(mr),
2920 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002921 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002922 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002923 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002924 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002925 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002926 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002927 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002928 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002929 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002930 memory_region_name(mr),
2931 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002932 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002933
2934 QTAILQ_INIT(&submr_print_queue);
2935
Blue Swirl314e2982011-09-11 20:22:05 +00002936 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002937 new_ml = g_new(MemoryRegionList, 1);
2938 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002939 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002940 if (new_ml->mr->addr < ml->mr->addr ||
2941 (new_ml->mr->addr == ml->mr->addr &&
2942 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002943 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002944 new_ml = NULL;
2945 break;
2946 }
2947 }
2948 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002949 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002950 }
2951 }
2952
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002953 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002954 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002955 alias_print_queue);
2956 }
2957
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002958 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002959 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002960 }
2961}
2962
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002963struct FlatViewInfo {
2964 fprintf_function mon_printf;
2965 void *f;
2966 int counter;
2967 bool dispatch_tree;
2968};
2969
2970static void mtree_print_flatview(gpointer key, gpointer value,
2971 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08002972{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002973 FlatView *view = key;
2974 GArray *fv_address_spaces = value;
2975 struct FlatViewInfo *fvi = user_data;
2976 fprintf_function p = fvi->mon_printf;
2977 void *f = fvi->f;
Peter Xu57bb40c2017-01-16 16:40:05 +08002978 FlatRange *range = &view->ranges[0];
2979 MemoryRegion *mr;
2980 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002981 int i;
2982 AddressSpace *as;
2983
2984 p(f, "FlatView #%d\n", fvi->counter);
2985 ++fvi->counter;
2986
2987 for (i = 0; i < fv_address_spaces->len; ++i) {
2988 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2989 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2990 if (as->root->alias) {
2991 p(f, ", alias %s", memory_region_name(as->root->alias));
2992 }
2993 p(f, "\n");
2994 }
2995
2996 p(f, " Root memory region: %s\n",
2997 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08002998
2999 if (n <= 0) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003000 p(f, MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003001 return;
3002 }
3003
3004 while (n--) {
3005 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01003006 if (range->offset_in_region) {
3007 p(f, MTREE_INDENT TARGET_FMT_plx "-"
3008 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
3009 int128_get64(range->addr.start),
3010 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3011 mr->priority,
3012 range->readonly ? "rom" : memory_region_type(mr),
3013 memory_region_name(mr),
3014 range->offset_in_region);
3015 } else {
3016 p(f, MTREE_INDENT TARGET_FMT_plx "-"
3017 TARGET_FMT_plx " (prio %d, %s): %s\n",
3018 int128_get64(range->addr.start),
3019 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3020 mr->priority,
3021 range->readonly ? "rom" : memory_region_type(mr),
3022 memory_region_name(mr));
3023 }
Peter Xu57bb40c2017-01-16 16:40:05 +08003024 range++;
3025 }
3026
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003027#if !defined(CONFIG_USER_ONLY)
3028 if (fvi->dispatch_tree && view->root) {
3029 mtree_print_dispatch(p, f, view->dispatch, view->root);
3030 }
3031#endif
3032
3033 p(f, "\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003034}
3035
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003036static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3037 gpointer user_data)
3038{
3039 FlatView *view = key;
3040 GArray *fv_address_spaces = value;
3041
3042 g_array_unref(fv_address_spaces);
3043 flatview_unref(view);
3044
3045 return true;
3046}
3047
3048void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3049 bool dispatch_tree)
Blue Swirl314e2982011-09-11 20:22:05 +00003050{
3051 MemoryRegionListHead ml_head;
3052 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02003053 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00003054
Peter Xu57bb40c2017-01-16 16:40:05 +08003055 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003056 FlatView *view;
3057 struct FlatViewInfo fvi = {
3058 .mon_printf = mon_printf,
3059 .f = f,
3060 .counter = 0,
3061 .dispatch_tree = dispatch_tree
3062 };
3063 GArray *fv_address_spaces;
3064 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3065
3066 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003067 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003068 view = address_space_get_flatview(as);
3069
3070 fv_address_spaces = g_hash_table_lookup(views, view);
3071 if (!fv_address_spaces) {
3072 fv_address_spaces = g_array_new(false, false, sizeof(as));
3073 g_hash_table_insert(views, view, fv_address_spaces);
3074 }
3075
3076 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003077 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003078
3079 /* Print */
3080 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3081
3082 /* Free */
3083 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3084 g_hash_table_unref(views);
3085
Peter Xu57bb40c2017-01-16 16:40:05 +08003086 return;
3087 }
3088
Blue Swirl314e2982011-09-11 20:22:05 +00003089 QTAILQ_INIT(&ml_head);
3090
Avi Kivity0d673e32012-10-02 15:28:50 +02003091 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003092 mon_printf(f, "address-space: %s\n", as->name);
3093 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3094 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003095 }
3096
Blue Swirl314e2982011-09-11 20:22:05 +00003097 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003098 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003099 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3100 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3101 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003102 }
3103
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003104 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003105 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003106 }
Blue Swirl314e2982011-09-11 20:22:05 +00003107}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003108
Peter Maydellb08199c2017-07-07 15:42:51 +01003109void memory_region_init_ram(MemoryRegion *mr,
3110 struct Object *owner,
3111 const char *name,
3112 uint64_t size,
3113 Error **errp)
3114{
3115 DeviceState *owner_dev;
3116 Error *err = NULL;
3117
3118 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3119 if (err) {
3120 error_propagate(errp, err);
3121 return;
3122 }
3123 /* This will assert if owner is neither NULL nor a DeviceState.
3124 * We only want the owner here for the purposes of defining a
3125 * unique name for migration. TODO: Ideally we should implement
3126 * a naming scheme for Objects which are not DeviceStates, in
3127 * which case we can relax this restriction.
3128 */
3129 owner_dev = DEVICE(owner);
3130 vmstate_register_ram(mr, owner_dev);
3131}
3132
3133void memory_region_init_rom(MemoryRegion *mr,
3134 struct Object *owner,
3135 const char *name,
3136 uint64_t size,
3137 Error **errp)
3138{
3139 DeviceState *owner_dev;
3140 Error *err = NULL;
3141
3142 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3143 if (err) {
3144 error_propagate(errp, err);
3145 return;
3146 }
3147 /* This will assert if owner is neither NULL nor a DeviceState.
3148 * We only want the owner here for the purposes of defining a
3149 * unique name for migration. TODO: Ideally we should implement
3150 * a naming scheme for Objects which are not DeviceStates, in
3151 * which case we can relax this restriction.
3152 */
3153 owner_dev = DEVICE(owner);
3154 vmstate_register_ram(mr, owner_dev);
3155}
3156
3157void memory_region_init_rom_device(MemoryRegion *mr,
3158 struct Object *owner,
3159 const MemoryRegionOps *ops,
3160 void *opaque,
3161 const char *name,
3162 uint64_t size,
3163 Error **errp)
3164{
3165 DeviceState *owner_dev;
3166 Error *err = NULL;
3167
3168 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3169 name, size, &err);
3170 if (err) {
3171 error_propagate(errp, err);
3172 return;
3173 }
3174 /* This will assert if owner is neither NULL nor a DeviceState.
3175 * We only want the owner here for the purposes of defining a
3176 * unique name for migration. TODO: Ideally we should implement
3177 * a naming scheme for Objects which are not DeviceStates, in
3178 * which case we can relax this restriction.
3179 */
3180 owner_dev = DEVICE(owner);
3181 vmstate_register_ram(mr, owner_dev);
3182}
3183
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003184static const TypeInfo memory_region_info = {
3185 .parent = TYPE_OBJECT,
3186 .name = TYPE_MEMORY_REGION,
3187 .instance_size = sizeof(MemoryRegion),
3188 .instance_init = memory_region_initfn,
3189 .instance_finalize = memory_region_finalize,
3190};
3191
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003192static const TypeInfo iommu_memory_region_info = {
3193 .parent = TYPE_MEMORY_REGION,
3194 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003195 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003196 .instance_size = sizeof(IOMMUMemoryRegion),
3197 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003198 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003199};
3200
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003201static void memory_register_types(void)
3202{
3203 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003204 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003205}
3206
3207type_init(memory_register_types)