blob: 68cdf8bad0c459f73cb8b64744e5fefbc49d4f26 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020042static bool global_dirty_log = false;
43
Avi Kivity72e22d22012-02-08 15:05:50 +020044static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100050static GHashTable *flat_views;
51
Avi Kivity093bc2c2011-07-26 14:26:01 +030052typedef struct AddrRange AddrRange;
53
Avi Kivity8417ceb2011-08-03 11:56:14 +030054/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080055 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030056 * (large MemoryRegion::alias_offset).
57 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030058struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020059 Int128 start;
60 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030061};
62
Avi Kivity08dafab2011-10-16 13:19:17 +020063static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030064{
65 return (AddrRange) { start, size };
66}
67
68static bool addrrange_equal(AddrRange r1, AddrRange r2)
69{
Avi Kivity08dafab2011-10-16 13:19:17 +020070 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030071}
72
Avi Kivity08dafab2011-10-16 13:19:17 +020073static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030074{
Avi Kivity08dafab2011-10-16 13:19:17 +020075 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030076}
77
Avi Kivity08dafab2011-10-16 13:19:17 +020078static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030079{
Avi Kivity08dafab2011-10-16 13:19:17 +020080 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030081 return range;
82}
83
Avi Kivity08dafab2011-10-16 13:19:17 +020084static bool addrrange_contains(AddrRange range, Int128 addr)
85{
86 return int128_ge(addr, range.start)
87 && int128_lt(addr, addrrange_end(range));
88}
89
Avi Kivity093bc2c2011-07-26 14:26:01 +030090static bool addrrange_intersects(AddrRange r1, AddrRange r2)
91{
Avi Kivity08dafab2011-10-16 13:19:17 +020092 return addrrange_contains(r1, r2.start)
93 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030094}
95
96static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
97{
Avi Kivity08dafab2011-10-16 13:19:17 +020098 Int128 start = int128_max(r1.start, r2.start);
99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
100 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300101}
102
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200103enum ListenerDirection { Forward, Reverse };
104
Avi Kivity7376e582012-02-08 21:05:17 +0200105#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200106 do { \
107 MemoryListener *_listener; \
108 \
109 switch (_direction) { \
110 case Forward: \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
114 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200115 } \
116 break; \
117 case Reverse: \
118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
119 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
Paolo Bonzini9a546352016-09-22 16:23:06 +0200130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200131 do { \
132 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200133 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200134 \
135 switch (_direction) { \
136 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200137 QTAILQ_FOREACH(_listener, list, link_as) { \
138 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200139 _listener->_callback(_listener, _section, ##_args); \
140 } \
141 } \
142 break; \
143 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200144 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
145 link_as) { \
146 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200147 _listener->_callback(_listener, _section, ##_args); \
148 } \
149 } \
150 break; \
151 default: \
152 abort(); \
153 } \
154 } while (0)
155
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200156/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200157#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200158 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000159 MemoryRegionSection mrs = section_from_flat_range(fr, \
160 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200161 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200162 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200163
Avi Kivity093bc2c2011-07-26 14:26:01 +0300164struct CoalescedMemoryRange {
165 AddrRange addr;
166 QTAILQ_ENTRY(CoalescedMemoryRange) link;
167};
168
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300169struct MemoryRegionIoeventfd {
170 AddrRange addr;
171 bool match_data;
172 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200173 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300174};
175
176static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
177 MemoryRegionIoeventfd b)
178{
Avi Kivity08dafab2011-10-16 13:19:17 +0200179 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200181 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200183 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300184 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200185 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300186 return false;
187 } else if (a.match_data < b.match_data) {
188 return true;
189 } else if (a.match_data > b.match_data) {
190 return false;
191 } else if (a.match_data) {
192 if (a.data < b.data) {
193 return true;
194 } else if (a.data > b.data) {
195 return false;
196 }
197 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200198 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300199 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200200 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300201 return false;
202 }
203 return false;
204}
205
206static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
207 MemoryRegionIoeventfd b)
208{
209 return !memory_region_ioeventfd_before(a, b)
210 && !memory_region_ioeventfd_before(b, a);
211}
212
Avi Kivity093bc2c2011-07-26 14:26:01 +0300213typedef struct FlatRange FlatRange;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300214
215/* Range of memory in the global map. Addresses are absolute. */
216struct FlatRange {
217 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200218 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300219 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300220 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200221 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300222 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300223};
224
225/* Flattened global view of current active memory hierarchy. Kept in sorted
226 * order.
227 */
228struct FlatView {
Paolo Bonzini374f2982013-05-17 12:37:03 +0200229 struct rcu_head rcu;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200230 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300231 FlatRange *ranges;
232 unsigned nr;
233 unsigned nr_allocated;
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000234 struct AddressSpaceDispatch *dispatch;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000235 MemoryRegion *root;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300236};
237
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300238typedef struct AddressSpaceOps AddressSpaceOps;
239
Avi Kivity093bc2c2011-07-26 14:26:01 +0300240#define FOR_EACH_FLAT_RANGE(var, view) \
241 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
242
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200243static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000244section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200245{
246 return (MemoryRegionSection) {
247 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000248 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200249 .offset_within_region = fr->offset_in_region,
250 .size = fr->addr.size,
251 .offset_within_address_space = int128_get64(fr->addr.start),
252 .readonly = fr->readonly,
253 };
254}
255
Avi Kivity093bc2c2011-07-26 14:26:01 +0300256static bool flatrange_equal(FlatRange *a, FlatRange *b)
257{
258 return a->mr == b->mr
259 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300260 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200261 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300262 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300263}
264
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000265static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300266{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000267 FlatView *view;
268
269 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200270 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000271 view->root = mr_root;
272 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200273 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000274
275 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300276}
277
278/* Insert a range into a given position. Caller is responsible for maintaining
279 * sorting order.
280 */
281static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
282{
283 if (view->nr == view->nr_allocated) {
284 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500285 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300286 view->nr_allocated * sizeof(*view->ranges));
287 }
288 memmove(view->ranges + pos + 1, view->ranges + pos,
289 (view->nr - pos) * sizeof(FlatRange));
290 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200291 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300292 ++view->nr;
293}
294
295static void flatview_destroy(FlatView *view)
296{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200297 int i;
298
Paolo Bonzini02d96512017-09-21 12:34:00 +0200299 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000300 if (view->dispatch) {
301 address_space_dispatch_free(view->dispatch);
302 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200303 for (i = 0; i < view->nr; i++) {
304 memory_region_unref(view->ranges[i].mr);
305 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500306 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000307 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200308 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300309}
310
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200311static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200312{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200313 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200314}
315
316static void flatview_unref(FlatView *view)
317{
318 if (atomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200319 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000320 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200321 }
322}
323
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000324FlatView *address_space_to_flatview(AddressSpace *as)
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000325{
326 return atomic_rcu_read(&as->current_map);
327}
328
329AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
330{
331 return fv->dispatch;
332}
333
334AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
335{
336 return flatview_to_dispatch(address_space_to_flatview(as));
337}
338
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300339static bool can_merge(FlatRange *r1, FlatRange *r2)
340{
Avi Kivity08dafab2011-10-16 13:19:17 +0200341 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300342 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200343 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
344 r1->addr.size),
345 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300346 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200347 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300348 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300349}
350
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000351/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300352static void flatview_simplify(FlatView *view)
353{
354 unsigned i, j;
355
356 i = 0;
357 while (i < view->nr) {
358 j = i + 1;
359 while (j < view->nr
360 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200361 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300362 ++j;
363 }
364 ++i;
365 memmove(&view->ranges[i], &view->ranges[j],
366 (view->nr - j) * sizeof(view->ranges[j]));
367 view->nr -= j - i;
368 }
369}
370
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200371static bool memory_region_big_endian(MemoryRegion *mr)
372{
373#ifdef TARGET_WORDS_BIGENDIAN
374 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
375#else
376 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
377#endif
378}
379
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200380static bool memory_region_wrong_endianness(MemoryRegion *mr)
381{
382#ifdef TARGET_WORDS_BIGENDIAN
383 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
384#else
385 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
386#endif
387}
388
389static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
390{
391 if (memory_region_wrong_endianness(mr)) {
392 switch (size) {
393 case 1:
394 break;
395 case 2:
396 *data = bswap16(*data);
397 break;
398 case 4:
399 *data = bswap32(*data);
400 break;
401 case 8:
402 *data = bswap64(*data);
403 break;
404 default:
405 abort();
406 }
407 }
408}
409
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800410static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
411{
412 MemoryRegion *root;
413 hwaddr abs_addr = offset;
414
415 abs_addr += mr->addr;
416 for (root = mr; root->container; ) {
417 root = root->container;
418 abs_addr += root->addr;
419 }
420
421 return abs_addr;
422}
423
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800424static int get_cpu_index(void)
425{
426 if (current_cpu) {
427 return current_cpu->cpu_index;
428 }
429 return -1;
430}
431
Peter Maydellcc05c432015-04-26 16:49:23 +0100432static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
433 hwaddr addr,
434 uint64_t *value,
435 unsigned size,
436 unsigned shift,
437 uint64_t mask,
438 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200439{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200440 uint64_t tmp;
441
442 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800443 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800444 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800445 } else if (mr == &io_mem_notdirty) {
446 /* Accesses to code which has previously been translated into a TB show
447 * up in the MMIO path, as accesses to the io_mem_notdirty
448 * MemoryRegion. */
449 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800450 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
451 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800452 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800453 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200454 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100455 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200456}
457
Peter Maydellcc05c432015-04-26 16:49:23 +0100458static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
459 hwaddr addr,
460 uint64_t *value,
461 unsigned size,
462 unsigned shift,
463 uint64_t mask,
464 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300465{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300466 uint64_t tmp;
467
468 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800469 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800470 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800471 } else if (mr == &io_mem_notdirty) {
472 /* Accesses to code which has previously been translated into a TB show
473 * up in the MMIO path, as accesses to the io_mem_notdirty
474 * MemoryRegion. */
475 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800476 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
477 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800478 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800479 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300480 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100481 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300482}
483
Peter Maydellcc05c432015-04-26 16:49:23 +0100484static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
485 hwaddr addr,
486 uint64_t *value,
487 unsigned size,
488 unsigned shift,
489 uint64_t mask,
490 MemTxAttrs attrs)
491{
492 uint64_t tmp = 0;
493 MemTxResult r;
494
Peter Maydellcc05c432015-04-26 16:49:23 +0100495 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800496 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800497 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800498 } else if (mr == &io_mem_notdirty) {
499 /* Accesses to code which has previously been translated into a TB show
500 * up in the MMIO path, as accesses to the io_mem_notdirty
501 * MemoryRegion. */
502 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800503 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
504 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800505 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800506 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100507 *value |= (tmp & mask) << shift;
508 return r;
509}
510
511static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
512 hwaddr addr,
513 uint64_t *value,
514 unsigned size,
515 unsigned shift,
516 uint64_t mask,
517 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200518{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200519 uint64_t tmp;
520
521 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800522 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800523 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800524 } else if (mr == &io_mem_notdirty) {
525 /* Accesses to code which has previously been translated into a TB show
526 * up in the MMIO path, as accesses to the io_mem_notdirty
527 * MemoryRegion. */
528 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800529 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
530 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800531 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800532 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200533 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100534 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200535}
536
Peter Maydellcc05c432015-04-26 16:49:23 +0100537static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
538 hwaddr addr,
539 uint64_t *value,
540 unsigned size,
541 unsigned shift,
542 uint64_t mask,
543 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300544{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300545 uint64_t tmp;
546
547 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800548 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800549 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800550 } else if (mr == &io_mem_notdirty) {
551 /* Accesses to code which has previously been translated into a TB show
552 * up in the MMIO path, as accesses to the io_mem_notdirty
553 * MemoryRegion. */
554 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800555 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
556 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800557 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800558 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300559 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100560 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300561}
562
Peter Maydellcc05c432015-04-26 16:49:23 +0100563static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
564 hwaddr addr,
565 uint64_t *value,
566 unsigned size,
567 unsigned shift,
568 uint64_t mask,
569 MemTxAttrs attrs)
570{
571 uint64_t tmp;
572
Peter Maydellcc05c432015-04-26 16:49:23 +0100573 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800574 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800575 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800576 } else if (mr == &io_mem_notdirty) {
577 /* Accesses to code which has previously been translated into a TB show
578 * up in the MMIO path, as accesses to the io_mem_notdirty
579 * MemoryRegion. */
580 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800581 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
582 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800583 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800584 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100585 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
586}
587
588static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300589 uint64_t *value,
590 unsigned size,
591 unsigned access_size_min,
592 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200593 MemTxResult (*access_fn)
594 (MemoryRegion *mr,
595 hwaddr addr,
596 uint64_t *value,
597 unsigned size,
598 unsigned shift,
599 uint64_t mask,
600 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100601 MemoryRegion *mr,
602 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300603{
604 uint64_t access_mask;
605 unsigned access_size;
606 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100607 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300608
609 if (!access_size_min) {
610 access_size_min = 1;
611 }
612 if (!access_size_max) {
613 access_size_max = 4;
614 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200615
616 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300617 access_size = MAX(MIN(size, access_size_max), access_size_min);
618 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200619 if (memory_region_big_endian(mr)) {
620 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200621 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100622 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200623 }
624 } else {
625 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200626 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100627 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200628 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300629 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100630 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300631}
632
Avi Kivitye2177952011-12-08 15:00:18 +0200633static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
634{
Avi Kivity0d673e32012-10-02 15:28:50 +0200635 AddressSpace *as;
636
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200637 while (mr->container) {
638 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200639 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200640 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
641 if (mr == as->root) {
642 return as;
643 }
Avi Kivitye2177952011-12-08 15:00:18 +0200644 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200645 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200646}
647
Avi Kivity093bc2c2011-07-26 14:26:01 +0300648/* Render a memory region into the global view. Ranges in @view obscure
649 * ranges in @mr.
650 */
651static void render_memory_region(FlatView *view,
652 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200653 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300654 AddrRange clip,
655 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300656{
657 MemoryRegion *subregion;
658 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200659 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200660 Int128 remain;
661 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300662 FlatRange fr;
663 AddrRange tmp;
664
Avi Kivity6bba19b2011-09-14 11:54:58 +0300665 if (!mr->enabled) {
666 return;
667 }
668
Avi Kivity08dafab2011-10-16 13:19:17 +0200669 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300670 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300671
672 tmp = addrrange_make(base, mr->size);
673
674 if (!addrrange_intersects(tmp, clip)) {
675 return;
676 }
677
678 clip = addrrange_intersection(tmp, clip);
679
680 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200681 int128_subfrom(&base, int128_make64(mr->alias->addr));
682 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300683 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300684 return;
685 }
686
687 /* Render subregions in priority order. */
688 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300689 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300690 }
691
Avi Kivity14a3c102011-07-26 14:26:06 +0300692 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300693 return;
694 }
695
Avi Kivity08dafab2011-10-16 13:19:17 +0200696 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300697 base = clip.start;
698 remain = clip.size;
699
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000700 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100701 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200702 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000703 fr.readonly = readonly;
704
Avi Kivity093bc2c2011-07-26 14:26:01 +0300705 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200706 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
707 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300708 continue;
709 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200710 if (int128_lt(base, view->ranges[i].addr.start)) {
711 now = int128_min(remain,
712 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300713 fr.offset_in_region = offset_in_region;
714 fr.addr = addrrange_make(base, now);
715 flatview_insert(view, i, &fr);
716 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200717 int128_addto(&base, now);
718 offset_in_region += int128_get64(now);
719 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300720 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200721 now = int128_sub(int128_min(int128_add(base, remain),
722 addrrange_end(view->ranges[i].addr)),
723 base);
724 int128_addto(&base, now);
725 offset_in_region += int128_get64(now);
726 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300727 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200728 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300729 fr.offset_in_region = offset_in_region;
730 fr.addr = addrrange_make(base, remain);
731 flatview_insert(view, i, &fr);
732 }
733}
734
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000735static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
736{
737 while (mr->alias && !mr->alias_offset &&
738 int128_ge(mr->size, mr->alias->size)) {
739 /* The alias is included in its entirety. Use it as
740 * the "real" root, so that we can share more FlatViews.
741 */
742 mr = mr->alias;
743 }
744
745 return mr;
746}
747
Avi Kivity093bc2c2011-07-26 14:26:01 +0300748/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200749static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300750{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000751 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200752 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300753
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000754 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300755
Avi Kivity83f3c252012-10-07 12:59:55 +0200756 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200757 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200758 addrrange_make(int128_zero(), int128_2_64()), false);
759 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200760 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300761
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000762 view->dispatch = address_space_dispatch_new(view);
763 for (i = 0; i < view->nr; i++) {
764 MemoryRegionSection mrs =
765 section_from_flat_range(&view->ranges[i], view);
766 flatview_add_to_dispatch(view, &mrs);
767 }
768 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000769 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000770
Avi Kivity093bc2c2011-07-26 14:26:01 +0300771 return view;
772}
773
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300774static void address_space_add_del_ioeventfds(AddressSpace *as,
775 MemoryRegionIoeventfd *fds_new,
776 unsigned fds_new_nb,
777 MemoryRegionIoeventfd *fds_old,
778 unsigned fds_old_nb)
779{
780 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200781 MemoryRegionIoeventfd *fd;
782 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300783
784 /* Generate a symmetric difference of the old and new fd sets, adding
785 * and deleting as necessary.
786 */
787
788 iold = inew = 0;
789 while (iold < fds_old_nb || inew < fds_new_nb) {
790 if (iold < fds_old_nb
791 && (inew == fds_new_nb
792 || memory_region_ioeventfd_before(fds_old[iold],
793 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200794 fd = &fds_old[iold];
795 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000796 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200797 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200798 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200799 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200800 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200801 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300802 ++iold;
803 } else if (inew < fds_new_nb
804 && (iold == fds_old_nb
805 || memory_region_ioeventfd_before(fds_new[inew],
806 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200807 fd = &fds_new[inew];
808 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000809 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200810 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200811 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200812 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200813 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200814 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300815 ++inew;
816 } else {
817 ++iold;
818 ++inew;
819 }
820 }
821}
822
Paolo Bonzini856d7242013-05-06 11:57:21 +0200823static FlatView *address_space_get_flatview(AddressSpace *as)
824{
825 FlatView *view;
826
Paolo Bonzini374f2982013-05-17 12:37:03 +0200827 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200828 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000829 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200830 /* If somebody has replaced as->current_map concurrently,
831 * flatview_ref returns false.
832 */
833 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200834 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200835 return view;
836}
837
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300838static void address_space_update_ioeventfds(AddressSpace *as)
839{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200840 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300841 FlatRange *fr;
842 unsigned ioeventfd_nb = 0;
843 MemoryRegionIoeventfd *ioeventfds = NULL;
844 AddrRange tmp;
845 unsigned i;
846
Paolo Bonzini856d7242013-05-06 11:57:21 +0200847 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200848 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300849 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
850 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200851 int128_sub(fr->addr.start,
852 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300853 if (addrrange_intersects(fr->addr, tmp)) {
854 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500855 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300856 ioeventfd_nb * sizeof(*ioeventfds));
857 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
858 ioeventfds[ioeventfd_nb-1].addr = tmp;
859 }
860 }
861 }
862
863 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
864 as->ioeventfds, as->ioeventfd_nb);
865
Anthony Liguori7267c092011-08-20 22:09:37 -0500866 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300867 as->ioeventfds = ioeventfds;
868 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200869 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300870}
871
Avi Kivityb8af1af2011-07-26 14:26:12 +0300872static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200873 const FlatView *old_view,
874 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300875 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300876{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300877 unsigned iold, inew;
878 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300879
880 /* Generate a symmetric difference of the old and new memory maps.
881 * Kill ranges in the old map, and instantiate ranges in the new map.
882 */
883 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200884 while (iold < old_view->nr || inew < new_view->nr) {
885 if (iold < old_view->nr) {
886 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300887 } else {
888 frold = NULL;
889 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200890 if (inew < new_view->nr) {
891 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300892 } else {
893 frnew = NULL;
894 }
895
896 if (frold
897 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200898 || int128_lt(frold->addr.start, frnew->addr.start)
899 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300900 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000901 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300902
Avi Kivityb8af1af2011-07-26 14:26:12 +0300903 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200904 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300905 }
906
Avi Kivity093bc2c2011-07-26 14:26:01 +0300907 ++iold;
908 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000909 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300910
Avi Kivityb8af1af2011-07-26 14:26:12 +0300911 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200912 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200913 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
914 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
915 frold->dirty_log_mask,
916 frnew->dirty_log_mask);
917 }
918 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
919 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
920 frold->dirty_log_mask,
921 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300922 }
Avi Kivity5a583342011-07-26 14:26:02 +0300923 }
924
Avi Kivity093bc2c2011-07-26 14:26:01 +0300925 ++iold;
926 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300927 } else {
928 /* In new */
929
Avi Kivityb8af1af2011-07-26 14:26:12 +0300930 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200931 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300932 }
933
Avi Kivity093bc2c2011-07-26 14:26:01 +0300934 ++inew;
935 }
936 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300937}
938
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000939static void flatviews_init(void)
940{
941 if (flat_views) {
942 return;
943 }
944
945 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
946 (GDestroyNotify) flatview_unref);
947}
948
949static void flatviews_reset(void)
950{
951 AddressSpace *as;
952
953 if (flat_views) {
954 g_hash_table_unref(flat_views);
955 flat_views = NULL;
956 }
957 flatviews_init();
958
959 /* Render unique FVs */
960 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
961 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
962
963 if (g_hash_table_lookup(flat_views, physmr)) {
964 continue;
965 }
966
967 generate_memory_topology(physmr);
968 }
969}
970
971static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +0300972{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000973 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000974 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
975 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
976
977 assert(new_view);
978
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000979 if (old_view == new_view) {
980 return;
981 }
982
983 if (old_view) {
984 flatview_ref(old_view);
985 }
986
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000987 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000988
989 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000990 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
991
992 if (!old_view2) {
993 old_view2 = &tmpview;
994 }
995 address_space_update_topology_pass(as, old_view2, new_view, false);
996 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000997 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300998
Paolo Bonzini374f2982013-05-17 12:37:03 +0200999 /* Writes are protected by the BQL. */
1000 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001001 if (old_view) {
1002 flatview_unref(old_view);
1003 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001004
1005 /* Note that all the old MemoryRegions are still alive up to this
1006 * point. This relieves most MemoryListeners from the need to
1007 * ref/unref the MemoryRegions they get---unless they use them
1008 * outside the iothread mutex, in which case precise reference
1009 * counting is necessary.
1010 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001011 if (old_view) {
1012 flatview_unref(old_view);
1013 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001014}
1015
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001016static void address_space_update_topology(AddressSpace *as)
1017{
1018 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1019
1020 flatviews_init();
1021 if (!g_hash_table_lookup(flat_views, physmr)) {
1022 generate_memory_topology(physmr);
1023 }
1024 address_space_set_flatview(as);
1025}
1026
Avi Kivity4ef4db82011-07-26 14:26:13 +03001027void memory_region_transaction_begin(void)
1028{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001029 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001030 ++memory_region_transaction_depth;
1031}
1032
1033void memory_region_transaction_commit(void)
1034{
Avi Kivity0d673e32012-10-02 15:28:50 +02001035 AddressSpace *as;
1036
Avi Kivity4ef4db82011-07-26 14:26:13 +03001037 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001038 assert(qemu_mutex_iothread_locked());
1039
Avi Kivity4ef4db82011-07-26 14:26:13 +03001040 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001041 if (!memory_region_transaction_depth) {
1042 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001043 flatviews_reset();
1044
Gonglei4dc56152014-05-08 11:47:32 +08001045 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001046
Gonglei4dc56152014-05-08 11:47:32 +08001047 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001048 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001049 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001050 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001051 memory_region_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001052 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1053 } else if (ioeventfd_update_pending) {
1054 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1055 address_space_update_ioeventfds(as);
1056 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001057 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001058 }
Gonglei4dc56152014-05-08 11:47:32 +08001059 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001060}
1061
Avi Kivity545e92e2011-08-08 19:58:48 +03001062static void memory_region_destructor_none(MemoryRegion *mr)
1063{
1064}
1065
1066static void memory_region_destructor_ram(MemoryRegion *mr)
1067{
Fam Zhengf1060c52016-03-01 14:18:22 +08001068 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001069}
1070
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001071static bool memory_region_need_escape(char c)
1072{
1073 return c == '/' || c == '[' || c == '\\' || c == ']';
1074}
1075
1076static char *memory_region_escape_name(const char *name)
1077{
1078 const char *p;
1079 char *escaped, *q;
1080 uint8_t c;
1081 size_t bytes = 0;
1082
1083 for (p = name; *p; p++) {
1084 bytes += memory_region_need_escape(*p) ? 4 : 1;
1085 }
1086 if (bytes == p - name) {
1087 return g_memdup(name, bytes + 1);
1088 }
1089
1090 escaped = g_malloc(bytes + 1);
1091 for (p = name, q = escaped; *p; p++) {
1092 c = *p;
1093 if (unlikely(memory_region_need_escape(c))) {
1094 *q++ = '\\';
1095 *q++ = 'x';
1096 *q++ = "0123456789abcdef"[c >> 4];
1097 c = "0123456789abcdef"[c & 15];
1098 }
1099 *q++ = c;
1100 }
1101 *q = 0;
1102 return escaped;
1103}
1104
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001105static void memory_region_do_init(MemoryRegion *mr,
1106 Object *owner,
1107 const char *name,
1108 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001109{
Avi Kivity08dafab2011-10-16 13:19:17 +02001110 mr->size = int128_make64(size);
1111 if (size == UINT64_MAX) {
1112 mr->size = int128_2_64();
1113 }
Peter Maydell302fa282014-08-19 20:05:46 +01001114 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001115 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001116 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001117
1118 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001119 char *escaped_name = memory_region_escape_name(name);
1120 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001121
1122 if (!owner) {
1123 owner = container_get(qdev_get_machine(), "/unattached");
1124 }
1125
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001126 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001127 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001128 g_free(name_array);
1129 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001130 }
1131}
1132
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001133void memory_region_init(MemoryRegion *mr,
1134 Object *owner,
1135 const char *name,
1136 uint64_t size)
1137{
1138 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1139 memory_region_do_init(mr, owner, name, size);
1140}
1141
Eric Blaked7bce992016-01-29 06:48:55 -07001142static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1143 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001144{
1145 MemoryRegion *mr = MEMORY_REGION(obj);
1146 uint64_t value = mr->addr;
1147
Eric Blake51e72bc2016-01-29 06:48:54 -07001148 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001149}
1150
Eric Blaked7bce992016-01-29 06:48:55 -07001151static void memory_region_get_container(Object *obj, Visitor *v,
1152 const char *name, void *opaque,
1153 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001154{
1155 MemoryRegion *mr = MEMORY_REGION(obj);
1156 gchar *path = (gchar *)"";
1157
1158 if (mr->container) {
1159 path = object_get_canonical_path(OBJECT(mr->container));
1160 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001161 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001162 if (mr->container) {
1163 g_free(path);
1164 }
1165}
1166
1167static Object *memory_region_resolve_container(Object *obj, void *opaque,
1168 const char *part)
1169{
1170 MemoryRegion *mr = MEMORY_REGION(obj);
1171
1172 return OBJECT(mr->container);
1173}
1174
Eric Blaked7bce992016-01-29 06:48:55 -07001175static void memory_region_get_priority(Object *obj, Visitor *v,
1176 const char *name, void *opaque,
1177 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001178{
1179 MemoryRegion *mr = MEMORY_REGION(obj);
1180 int32_t value = mr->priority;
1181
Eric Blake51e72bc2016-01-29 06:48:54 -07001182 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001183}
1184
Eric Blaked7bce992016-01-29 06:48:55 -07001185static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1186 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001187{
1188 MemoryRegion *mr = MEMORY_REGION(obj);
1189 uint64_t value = memory_region_size(mr);
1190
Eric Blake51e72bc2016-01-29 06:48:54 -07001191 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001192}
1193
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001194static void memory_region_initfn(Object *obj)
1195{
1196 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001197 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001198
1199 mr->ops = &unassigned_mem_ops;
1200 mr->enabled = true;
1201 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001202 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001203 mr->destructor = memory_region_destructor_none;
1204 QTAILQ_INIT(&mr->subregions);
1205 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001206
1207 op = object_property_add(OBJECT(mr), "container",
1208 "link<" TYPE_MEMORY_REGION ">",
1209 memory_region_get_container,
1210 NULL, /* memory_region_set_container */
1211 NULL, NULL, &error_abort);
1212 op->resolve = memory_region_resolve_container;
1213
1214 object_property_add(OBJECT(mr), "addr", "uint64",
1215 memory_region_get_addr,
1216 NULL, /* memory_region_set_addr */
1217 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001218 object_property_add(OBJECT(mr), "priority", "uint32",
1219 memory_region_get_priority,
1220 NULL, /* memory_region_set_priority */
1221 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001222 object_property_add(OBJECT(mr), "size", "uint64",
1223 memory_region_get_size,
1224 NULL, /* memory_region_set_size, */
1225 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001226}
1227
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001228static void iommu_memory_region_initfn(Object *obj)
1229{
1230 MemoryRegion *mr = MEMORY_REGION(obj);
1231
1232 mr->is_iommu = true;
1233}
1234
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001235static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1236 unsigned size)
1237{
1238#ifdef DEBUG_UNASSIGNED
1239 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1240#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001241 if (current_cpu != NULL) {
1242 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001243 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001244 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001245}
1246
1247static void unassigned_mem_write(void *opaque, hwaddr addr,
1248 uint64_t val, unsigned size)
1249{
1250#ifdef DEBUG_UNASSIGNED
1251 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1252#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001253 if (current_cpu != NULL) {
1254 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001255 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001256}
1257
Paolo Bonzinid1970632013-05-24 13:23:38 +02001258static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1259 unsigned size, bool is_write)
1260{
1261 return false;
1262}
1263
1264const MemoryRegionOps unassigned_mem_ops = {
1265 .valid.accepts = unassigned_mem_accepts,
1266 .endianness = DEVICE_NATIVE_ENDIAN,
1267};
1268
Alex Williamson4a2e2422016-10-31 09:53:03 -06001269static uint64_t memory_region_ram_device_read(void *opaque,
1270 hwaddr addr, unsigned size)
1271{
1272 MemoryRegion *mr = opaque;
1273 uint64_t data = (uint64_t)~0;
1274
1275 switch (size) {
1276 case 1:
1277 data = *(uint8_t *)(mr->ram_block->host + addr);
1278 break;
1279 case 2:
1280 data = *(uint16_t *)(mr->ram_block->host + addr);
1281 break;
1282 case 4:
1283 data = *(uint32_t *)(mr->ram_block->host + addr);
1284 break;
1285 case 8:
1286 data = *(uint64_t *)(mr->ram_block->host + addr);
1287 break;
1288 }
1289
1290 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1291
1292 return data;
1293}
1294
1295static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1296 uint64_t data, unsigned size)
1297{
1298 MemoryRegion *mr = opaque;
1299
1300 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1301
1302 switch (size) {
1303 case 1:
1304 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1305 break;
1306 case 2:
1307 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1308 break;
1309 case 4:
1310 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1311 break;
1312 case 8:
1313 *(uint64_t *)(mr->ram_block->host + addr) = data;
1314 break;
1315 }
1316}
1317
1318static const MemoryRegionOps ram_device_mem_ops = {
1319 .read = memory_region_ram_device_read,
1320 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001321 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001322 .valid = {
1323 .min_access_size = 1,
1324 .max_access_size = 8,
1325 .unaligned = true,
1326 },
1327 .impl = {
1328 .min_access_size = 1,
1329 .max_access_size = 8,
1330 .unaligned = true,
1331 },
1332};
1333
Paolo Bonzinid2702032013-05-24 11:55:06 +02001334bool memory_region_access_valid(MemoryRegion *mr,
1335 hwaddr addr,
1336 unsigned size,
1337 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001338{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001339 int access_size_min, access_size_max;
1340 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001341
Avi Kivity093bc2c2011-07-26 14:26:01 +03001342 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1343 return false;
1344 }
1345
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001346 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001347 return true;
1348 }
1349
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001350 access_size_min = mr->ops->valid.min_access_size;
1351 if (!mr->ops->valid.min_access_size) {
1352 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001353 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001354
1355 access_size_max = mr->ops->valid.max_access_size;
1356 if (!mr->ops->valid.max_access_size) {
1357 access_size_max = 4;
1358 }
1359
1360 access_size = MAX(MIN(size, access_size_max), access_size_min);
1361 for (i = 0; i < size; i += access_size) {
1362 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1363 is_write)) {
1364 return false;
1365 }
1366 }
1367
Avi Kivity093bc2c2011-07-26 14:26:01 +03001368 return true;
1369}
1370
Peter Maydellcc05c432015-04-26 16:49:23 +01001371static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1372 hwaddr addr,
1373 uint64_t *pval,
1374 unsigned size,
1375 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001376{
Peter Maydellcc05c432015-04-26 16:49:23 +01001377 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001378
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001379 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001380 return access_with_adjusted_size(addr, pval, size,
1381 mr->ops->impl.min_access_size,
1382 mr->ops->impl.max_access_size,
1383 memory_region_read_accessor,
1384 mr, attrs);
1385 } else if (mr->ops->read_with_attrs) {
1386 return access_with_adjusted_size(addr, pval, size,
1387 mr->ops->impl.min_access_size,
1388 mr->ops->impl.max_access_size,
1389 memory_region_read_with_attrs_accessor,
1390 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001391 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001392 return access_with_adjusted_size(addr, pval, size, 1, 4,
1393 memory_region_oldmmio_read_accessor,
1394 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001395 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001396}
1397
Peter Maydell3b643492015-04-26 16:49:23 +01001398MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1399 hwaddr addr,
1400 uint64_t *pval,
1401 unsigned size,
1402 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001403{
Peter Maydellcc05c432015-04-26 16:49:23 +01001404 MemTxResult r;
1405
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001406 if (!memory_region_access_valid(mr, addr, size, false)) {
1407 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001408 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001409 }
Avi Kivitya621f382012-01-02 13:12:08 +02001410
Peter Maydellcc05c432015-04-26 16:49:23 +01001411 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001412 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001413 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001414}
1415
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001416/* Return true if an eventfd was signalled */
1417static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1418 hwaddr addr,
1419 uint64_t data,
1420 unsigned size,
1421 MemTxAttrs attrs)
1422{
1423 MemoryRegionIoeventfd ioeventfd = {
1424 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1425 .data = data,
1426 };
1427 unsigned i;
1428
1429 for (i = 0; i < mr->ioeventfd_nb; i++) {
1430 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1431 ioeventfd.e = mr->ioeventfds[i].e;
1432
1433 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1434 event_notifier_set(ioeventfd.e);
1435 return true;
1436 }
1437 }
1438
1439 return false;
1440}
1441
Peter Maydell3b643492015-04-26 16:49:23 +01001442MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1443 hwaddr addr,
1444 uint64_t data,
1445 unsigned size,
1446 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001447{
Avi Kivity897fa7c2011-11-13 13:05:27 +02001448 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001449 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001450 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001451 }
1452
Avi Kivitya621f382012-01-02 13:12:08 +02001453 adjust_endianness(mr, &data, size);
1454
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001455 if ((!kvm_eventfds_enabled()) &&
1456 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1457 return MEMTX_OK;
1458 }
1459
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001460 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001461 return access_with_adjusted_size(addr, &data, size,
1462 mr->ops->impl.min_access_size,
1463 mr->ops->impl.max_access_size,
1464 memory_region_write_accessor, mr,
1465 attrs);
1466 } else if (mr->ops->write_with_attrs) {
1467 return
1468 access_with_adjusted_size(addr, &data, size,
1469 mr->ops->impl.min_access_size,
1470 mr->ops->impl.max_access_size,
1471 memory_region_write_with_attrs_accessor,
1472 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001473 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001474 return access_with_adjusted_size(addr, &data, size, 1, 4,
1475 memory_region_oldmmio_write_accessor,
1476 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001477 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001478}
1479
Avi Kivity093bc2c2011-07-26 14:26:01 +03001480void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001481 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001482 const MemoryRegionOps *ops,
1483 void *opaque,
1484 const char *name,
1485 uint64_t size)
1486{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001487 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001488 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001489 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001490 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001491}
1492
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001493void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1494 Object *owner,
1495 const char *name,
1496 uint64_t size,
1497 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001498{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001499 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001500 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001501 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001502 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001503 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001504 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001505}
1506
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001507void memory_region_init_resizeable_ram(MemoryRegion *mr,
1508 Object *owner,
1509 const char *name,
1510 uint64_t size,
1511 uint64_t max_size,
1512 void (*resized)(const char*,
1513 uint64_t length,
1514 void *host),
1515 Error **errp)
1516{
1517 memory_region_init(mr, owner, name, size);
1518 mr->ram = true;
1519 mr->terminates = true;
1520 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001521 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1522 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001523 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001524}
1525
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001526#ifdef __linux__
1527void memory_region_init_ram_from_file(MemoryRegion *mr,
1528 struct Object *owner,
1529 const char *name,
1530 uint64_t size,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001531 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001532 const char *path,
1533 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001534{
1535 memory_region_init(mr, owner, name, size);
1536 mr->ram = true;
1537 mr->terminates = true;
1538 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001539 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001540 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001541}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001542
1543void memory_region_init_ram_from_fd(MemoryRegion *mr,
1544 struct Object *owner,
1545 const char *name,
1546 uint64_t size,
1547 bool share,
1548 int fd,
1549 Error **errp)
1550{
1551 memory_region_init(mr, owner, name, size);
1552 mr->ram = true;
1553 mr->terminates = true;
1554 mr->destructor = memory_region_destructor_ram;
1555 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1556 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1557}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001558#endif
1559
Avi Kivity093bc2c2011-07-26 14:26:01 +03001560void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001561 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001562 const char *name,
1563 uint64_t size,
1564 void *ptr)
1565{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001566 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001567 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001568 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001569 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001570 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001571
1572 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1573 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001574 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001575}
1576
Alex Williamson21e00fa2016-10-31 09:53:03 -06001577void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1578 Object *owner,
1579 const char *name,
1580 uint64_t size,
1581 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301582{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001583 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1584 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001585 mr->ops = &ram_device_mem_ops;
1586 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301587}
1588
Avi Kivity093bc2c2011-07-26 14:26:01 +03001589void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001590 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001591 const char *name,
1592 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001593 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001594 uint64_t size)
1595{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001596 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001597 mr->alias = orig;
1598 mr->alias_offset = offset;
1599}
1600
Peter Maydellb59821a2017-07-07 15:42:50 +01001601void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1602 struct Object *owner,
1603 const char *name,
1604 uint64_t size,
1605 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001606{
1607 memory_region_init(mr, owner, name, size);
1608 mr->ram = true;
1609 mr->readonly = true;
1610 mr->terminates = true;
1611 mr->destructor = memory_region_destructor_ram;
1612 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1613 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1614}
1615
Peter Maydellb59821a2017-07-07 15:42:50 +01001616void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1617 Object *owner,
1618 const MemoryRegionOps *ops,
1619 void *opaque,
1620 const char *name,
1621 uint64_t size,
1622 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001623{
Peter Maydell39e0b032016-07-04 13:06:35 +01001624 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001625 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001626 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001627 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001628 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001629 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001630 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001631 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001632}
1633
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001634void memory_region_init_iommu(void *_iommu_mr,
1635 size_t instance_size,
1636 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001637 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001638 const char *name,
1639 uint64_t size)
1640{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001641 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001642 struct MemoryRegion *mr;
1643
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001644 object_initialize(_iommu_mr, instance_size, mrtypename);
1645 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001646 memory_region_do_init(mr, owner, name, size);
1647 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001648 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001649 QLIST_INIT(&iommu_mr->iommu_notify);
1650 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001651}
1652
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001653static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001654{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001655 MemoryRegion *mr = MEMORY_REGION(obj);
1656
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001657 assert(!mr->container);
1658
1659 /* We know the region is not visible in any address space (it
1660 * does not have a container and cannot be a root either because
1661 * it has no references, so we can blindly clear mr->enabled.
1662 * memory_region_set_enabled instead could trigger a transaction
1663 * and cause an infinite loop.
1664 */
1665 mr->enabled = false;
1666 memory_region_transaction_begin();
1667 while (!QTAILQ_EMPTY(&mr->subregions)) {
1668 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1669 memory_region_del_subregion(mr, subregion);
1670 }
1671 memory_region_transaction_commit();
1672
Avi Kivity545e92e2011-08-08 19:58:48 +03001673 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001674 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001675 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001676 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001677}
1678
Paolo Bonzini803c0812013-05-07 06:59:09 +02001679Object *memory_region_owner(MemoryRegion *mr)
1680{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001681 Object *obj = OBJECT(mr);
1682 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001683}
1684
Paolo Bonzini46637be2013-05-07 09:06:00 +02001685void memory_region_ref(MemoryRegion *mr)
1686{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001687 /* MMIO callbacks most likely will access data that belongs
1688 * to the owner, hence the need to ref/unref the owner whenever
1689 * the memory region is in use.
1690 *
1691 * The memory region is a child of its owner. As long as the
1692 * owner doesn't call unparent itself on the memory region,
1693 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001694 * Memory regions without an owner are supposed to never go away;
1695 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001696 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001697 if (mr && mr->owner) {
1698 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001699 }
1700}
1701
1702void memory_region_unref(MemoryRegion *mr)
1703{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001704 if (mr && mr->owner) {
1705 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001706 }
1707}
1708
Avi Kivity093bc2c2011-07-26 14:26:01 +03001709uint64_t memory_region_size(MemoryRegion *mr)
1710{
Avi Kivity08dafab2011-10-16 13:19:17 +02001711 if (int128_eq(mr->size, int128_2_64())) {
1712 return UINT64_MAX;
1713 }
1714 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001715}
1716
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001717const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001718{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001719 if (!mr->name) {
1720 ((MemoryRegion *)mr)->name =
1721 object_get_canonical_path_component(OBJECT(mr));
1722 }
Peter Maydell302fa282014-08-19 20:05:46 +01001723 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001724}
1725
Alex Williamson21e00fa2016-10-31 09:53:03 -06001726bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301727{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001728 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301729}
1730
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001731uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001732{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001733 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001734 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001735 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1736 }
1737 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001738}
1739
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001740bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1741{
1742 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1743}
1744
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001745static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001746{
1747 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1748 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001749 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001750
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001751 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001752 flags |= iommu_notifier->notifier_flags;
1753 }
1754
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001755 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1756 imrc->notify_flag_changed(iommu_mr,
1757 iommu_mr->iommu_notify_flags,
1758 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001759 }
1760
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001761 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001762}
1763
Peter Xucdb30812016-09-23 13:02:26 +08001764void memory_region_register_iommu_notifier(MemoryRegion *mr,
1765 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001766{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001767 IOMMUMemoryRegion *iommu_mr;
1768
Jason Wangefcd38c2016-12-30 18:09:17 +08001769 if (mr->alias) {
1770 memory_region_register_iommu_notifier(mr->alias, n);
1771 return;
1772 }
1773
Peter Xucdb30812016-09-23 13:02:26 +08001774 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001775 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001776 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001777 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001778 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1779 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001780}
1781
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001782uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001783{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001784 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1785
1786 if (imrc->get_min_page_size) {
1787 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001788 }
1789 return TARGET_PAGE_SIZE;
1790}
1791
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001792void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001793{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001794 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001795 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001796 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001797 IOMMUTLBEntry iotlb;
1798
Peter Xufaa362e2017-04-07 18:59:11 +08001799 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001800 if (imrc->replay) {
1801 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001802 return;
1803 }
1804
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001805 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001806
David Gibsona788f222015-09-30 12:13:55 +10001807 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001808 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001809 if (iotlb.perm != IOMMU_NONE) {
1810 n->notify(n, &iotlb);
1811 }
1812
1813 /* if (2^64 - MR size) < granularity, it's possible to get an
1814 * infinite loop here. This should catch such a wraparound */
1815 if ((addr + granularity) < addr) {
1816 break;
1817 }
1818 }
1819}
1820
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001821void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001822{
1823 IOMMUNotifier *notifier;
1824
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001825 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1826 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001827 }
1828}
1829
Peter Xucdb30812016-09-23 13:02:26 +08001830void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1831 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001832{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001833 IOMMUMemoryRegion *iommu_mr;
1834
Jason Wangefcd38c2016-12-30 18:09:17 +08001835 if (mr->alias) {
1836 memory_region_unregister_iommu_notifier(mr->alias, n);
1837 return;
1838 }
Peter Xucdb30812016-09-23 13:02:26 +08001839 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001840 iommu_mr = IOMMU_MEMORY_REGION(mr);
1841 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001842}
1843
Peter Xubd2bfa42017-04-07 18:59:10 +08001844void memory_region_notify_one(IOMMUNotifier *notifier,
1845 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001846{
Peter Xucdb30812016-09-23 13:02:26 +08001847 IOMMUNotifierFlag request_flags;
1848
Peter Xubd2bfa42017-04-07 18:59:10 +08001849 /*
1850 * Skip the notification if the notification does not overlap
1851 * with registered range.
1852 */
1853 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1854 notifier->end < entry->iova) {
1855 return;
1856 }
Peter Xucdb30812016-09-23 13:02:26 +08001857
Peter Xubd2bfa42017-04-07 18:59:10 +08001858 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001859 request_flags = IOMMU_NOTIFIER_MAP;
1860 } else {
1861 request_flags = IOMMU_NOTIFIER_UNMAP;
1862 }
1863
Peter Xubd2bfa42017-04-07 18:59:10 +08001864 if (notifier->notifier_flags & request_flags) {
1865 notifier->notify(notifier, entry);
1866 }
1867}
1868
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001869void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001870 IOMMUTLBEntry entry)
1871{
1872 IOMMUNotifier *iommu_notifier;
1873
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001874 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001875
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001876 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001877 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001878 }
David Gibson06866572013-05-14 19:13:56 +10001879}
1880
Avi Kivity093bc2c2011-07-26 14:26:01 +03001881void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1882{
Avi Kivity5a583342011-07-26 14:26:02 +03001883 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001884 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001885
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001886 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001887 old_logging = mr->vga_logging_count;
1888 mr->vga_logging_count += log ? 1 : -1;
1889 if (!!old_logging == !!mr->vga_logging_count) {
1890 return;
1891 }
1892
Jan Kiszka59023ef2012-08-23 13:02:30 +02001893 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001894 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001895 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001896 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001897}
1898
Avi Kivitya8170e52012-10-23 12:30:10 +02001899bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1900 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001901{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001902 assert(mr->ram_block);
1903 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1904 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001905}
1906
Avi Kivitya8170e52012-10-23 12:30:10 +02001907void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1908 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001909{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001910 assert(mr->ram_block);
1911 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1912 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001913 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001914}
1915
Juan Quintela6c279db2012-10-17 20:24:28 +02001916bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1917 hwaddr size, unsigned client)
1918{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001919 assert(mr->ram_block);
1920 return cpu_physical_memory_test_and_clear_dirty(
1921 memory_region_get_ram_addr(mr) + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001922}
1923
Gerd Hoffmann8deaf122017-04-21 11:16:25 +02001924DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1925 hwaddr addr,
1926 hwaddr size,
1927 unsigned client)
1928{
1929 assert(mr->ram_block);
1930 return cpu_physical_memory_snapshot_and_clear_dirty(
1931 memory_region_get_ram_addr(mr) + addr, size, client);
1932}
1933
1934bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1935 hwaddr addr, hwaddr size)
1936{
1937 assert(mr->ram_block);
1938 return cpu_physical_memory_snapshot_get_dirty(snap,
1939 memory_region_get_ram_addr(mr) + addr, size);
1940}
Juan Quintela6c279db2012-10-17 20:24:28 +02001941
Avi Kivity093bc2c2011-07-26 14:26:01 +03001942void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1943{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001944 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001945 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001946 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001947 FlatRange *fr;
1948
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001949 /* If the same address space has multiple log_sync listeners, we
1950 * visit that address space's FlatView multiple times. But because
1951 * log_sync listeners are rare, it's still cheaper than walking each
1952 * address space once.
1953 */
1954 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1955 if (!listener->log_sync) {
1956 continue;
1957 }
1958 as = listener->address_space;
1959 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001960 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001961 if (fr->mr == mr) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10001962 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001963 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001964 }
Avi Kivity5a583342011-07-26 14:26:02 +03001965 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001966 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001967 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001968}
1969
1970void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1971{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001972 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001973 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001974 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01001975 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001976 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001977 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001978}
1979
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001980void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001981{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001982 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001983 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001984 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01001985 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001986 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001987 }
1988}
1989
Avi Kivitya8170e52012-10-23 12:30:10 +02001990void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1991 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001992{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001993 assert(mr->ram_block);
1994 cpu_physical_memory_test_and_clear_dirty(
1995 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001996}
1997
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001998int memory_region_get_fd(MemoryRegion *mr)
1999{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002000 int fd;
2001
2002 rcu_read_lock();
2003 while (mr->alias) {
2004 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002005 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002006 fd = mr->ram_block->fd;
2007 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002008
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002009 return fd;
2010}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002011
Avi Kivity093bc2c2011-07-26 14:26:01 +03002012void *memory_region_get_ram_ptr(MemoryRegion *mr)
2013{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002014 void *ptr;
2015 uint64_t offset = 0;
2016
2017 rcu_read_lock();
2018 while (mr->alias) {
2019 offset += mr->alias_offset;
2020 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002021 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002022 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002023 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002024 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002025
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002026 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002027}
2028
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002029MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2030{
2031 RAMBlock *block;
2032
2033 block = qemu_ram_block_from_host(ptr, false, offset);
2034 if (!block) {
2035 return NULL;
2036 }
2037
2038 return block->mr;
2039}
2040
Fam Zheng7ebb2742016-03-01 14:18:20 +08002041ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2042{
2043 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2044}
2045
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002046void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2047{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002048 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002049
Gongleifa53a0e2016-05-10 10:04:59 +08002050 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002051}
2052
Avi Kivity0d673e32012-10-02 15:28:50 +02002053static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002054{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002055 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002056 FlatRange *fr;
2057 CoalescedMemoryRange *cmr;
2058 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02002059 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002060
Paolo Bonzini856d7242013-05-06 11:57:21 +02002061 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002062 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002063 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02002064 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002065 .fv = view,
Avi Kivity95d29942012-10-02 18:21:54 +02002066 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002067 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02002068 };
2069
Paolo Bonzini9a546352016-09-22 16:23:06 +02002070 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002071 int128_get64(fr->addr.start),
2072 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002073 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2074 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02002075 int128_sub(fr->addr.start,
2076 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002077 if (!addrrange_intersects(tmp, fr->addr)) {
2078 continue;
2079 }
2080 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002081 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002082 int128_get64(tmp.start),
2083 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002084 }
2085 }
2086 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002087 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002088}
2089
Avi Kivity0d673e32012-10-02 15:28:50 +02002090static void memory_region_update_coalesced_range(MemoryRegion *mr)
2091{
2092 AddressSpace *as;
2093
2094 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2095 memory_region_update_coalesced_range_as(mr, as);
2096 }
2097}
2098
Avi Kivity093bc2c2011-07-26 14:26:01 +03002099void memory_region_set_coalescing(MemoryRegion *mr)
2100{
2101 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002102 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002103}
2104
2105void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002106 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002107 uint64_t size)
2108{
Anthony Liguori7267c092011-08-20 22:09:37 -05002109 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002110
Avi Kivity08dafab2011-10-16 13:19:17 +02002111 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002112 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2113 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002114 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002115}
2116
2117void memory_region_clear_coalescing(MemoryRegion *mr)
2118{
2119 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002120 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002121
Jan Kiszkad4105152012-08-23 13:02:29 +02002122 qemu_flush_coalesced_mmio_buffer();
2123 mr->flush_coalesced_mmio = false;
2124
Avi Kivity093bc2c2011-07-26 14:26:01 +03002125 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2126 cmr = QTAILQ_FIRST(&mr->coalesced);
2127 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002128 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002129 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002130 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002131
2132 if (updated) {
2133 memory_region_update_coalesced_range(mr);
2134 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002135}
2136
Jan Kiszkad4105152012-08-23 13:02:29 +02002137void memory_region_set_flush_coalesced(MemoryRegion *mr)
2138{
2139 mr->flush_coalesced_mmio = true;
2140}
2141
2142void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2143{
2144 qemu_flush_coalesced_mmio_buffer();
2145 if (QTAILQ_EMPTY(&mr->coalesced)) {
2146 mr->flush_coalesced_mmio = false;
2147 }
2148}
2149
Jan Kiszka196ea132015-06-18 18:47:20 +02002150void memory_region_set_global_locking(MemoryRegion *mr)
2151{
2152 mr->global_locking = true;
2153}
2154
2155void memory_region_clear_global_locking(MemoryRegion *mr)
2156{
2157 mr->global_locking = false;
2158}
2159
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002160static bool userspace_eventfd_warning;
2161
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002162void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002163 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002164 unsigned size,
2165 bool match_data,
2166 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002167 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002168{
2169 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002170 .addr.start = int128_make64(addr),
2171 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002172 .match_data = match_data,
2173 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002174 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002175 };
2176 unsigned i;
2177
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002178 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2179 userspace_eventfd_warning))) {
2180 userspace_eventfd_warning = true;
2181 error_report("Using eventfd without MMIO binding in KVM. "
2182 "Suboptimal performance expected");
2183 }
2184
Jason Wangb8aecea2015-11-06 16:02:45 +08002185 if (size) {
2186 adjust_endianness(mr, &mrfd.data, size);
2187 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002188 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002189 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2190 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2191 break;
2192 }
2193 }
2194 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002195 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002196 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2197 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2198 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2199 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002200 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002201 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002202}
2203
2204void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002205 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002206 unsigned size,
2207 bool match_data,
2208 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002209 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002210{
2211 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002212 .addr.start = int128_make64(addr),
2213 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002214 .match_data = match_data,
2215 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002216 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002217 };
2218 unsigned i;
2219
Jason Wangb8aecea2015-11-06 16:02:45 +08002220 if (size) {
2221 adjust_endianness(mr, &mrfd.data, size);
2222 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002223 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002224 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2225 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2226 break;
2227 }
2228 }
2229 assert(i != mr->ioeventfd_nb);
2230 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2231 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2232 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002233 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002234 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002235 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002236 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002237}
2238
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002239static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002240{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002241 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002242 MemoryRegion *other;
2243
Jan Kiszka59023ef2012-08-23 13:02:30 +02002244 memory_region_transaction_begin();
2245
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002246 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002247 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002248 if (subregion->priority >= other->priority) {
2249 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2250 goto done;
2251 }
2252 }
2253 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2254done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002255 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002256 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002257}
2258
Peter Crosthwaite05987012014-06-05 23:14:44 -07002259static void memory_region_add_subregion_common(MemoryRegion *mr,
2260 hwaddr offset,
2261 MemoryRegion *subregion)
2262{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002263 assert(!subregion->container);
2264 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002265 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002266 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002267}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002268
2269void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002270 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002271 MemoryRegion *subregion)
2272{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002273 subregion->priority = 0;
2274 memory_region_add_subregion_common(mr, offset, subregion);
2275}
2276
2277void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002278 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002279 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002280 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002281{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002282 subregion->priority = priority;
2283 memory_region_add_subregion_common(mr, offset, subregion);
2284}
2285
2286void memory_region_del_subregion(MemoryRegion *mr,
2287 MemoryRegion *subregion)
2288{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002289 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002290 assert(subregion->container == mr);
2291 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002292 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002293 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002294 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002295 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002296}
2297
2298void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2299{
2300 if (enabled == mr->enabled) {
2301 return;
2302 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002303 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002304 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002305 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002306 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002307}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002308
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002309void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2310{
2311 Int128 s = int128_make64(size);
2312
2313 if (size == UINT64_MAX) {
2314 s = int128_2_64();
2315 }
2316 if (int128_eq(s, mr->size)) {
2317 return;
2318 }
2319 memory_region_transaction_begin();
2320 mr->size = s;
2321 memory_region_update_pending = true;
2322 memory_region_transaction_commit();
2323}
2324
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002325static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002326{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002327 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002328
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002329 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002330 memory_region_transaction_begin();
2331 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002332 memory_region_del_subregion(container, mr);
2333 mr->container = container;
2334 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002335 memory_region_unref(mr);
2336 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002337 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002338}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002339
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002340void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2341{
2342 if (addr != mr->addr) {
2343 mr->addr = addr;
2344 memory_region_readd_subregion(mr);
2345 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002346}
2347
Avi Kivitya8170e52012-10-23 12:30:10 +02002348void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002349{
Avi Kivity47033592011-12-04 19:16:50 +02002350 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002351
Jan Kiszka59023ef2012-08-23 13:02:30 +02002352 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002353 return;
2354 }
2355
Jan Kiszka59023ef2012-08-23 13:02:30 +02002356 memory_region_transaction_begin();
2357 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002358 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002359 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002360}
2361
Igor Mammedova2b257d2014-10-31 16:38:37 +00002362uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2363{
2364 return mr->align;
2365}
2366
Avi Kivitye2177952011-12-08 15:00:18 +02002367static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2368{
2369 const AddrRange *addr = addr_;
2370 const FlatRange *fr = fr_;
2371
2372 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2373 return -1;
2374 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2375 return 1;
2376 }
2377 return 0;
2378}
2379
Paolo Bonzini99e86342013-05-06 10:26:13 +02002380static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002381{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002382 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002383 sizeof(FlatRange), cmp_flatrange_addr);
2384}
2385
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002386bool memory_region_is_mapped(MemoryRegion *mr)
2387{
2388 return mr->container ? true : false;
2389}
2390
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002391/* Same as memory_region_find, but it does not add a reference to the
2392 * returned region. It must be called from an RCU critical section.
2393 */
2394static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2395 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002396{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002397 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002398 MemoryRegion *root;
2399 AddressSpace *as;
2400 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002401 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002402 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002403
Paolo Bonzini73034e92013-05-07 15:48:28 +02002404 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002405 for (root = mr; root->container; ) {
2406 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002407 addr += root->addr;
2408 }
2409
2410 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002411 if (!as) {
2412 return ret;
2413 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002414 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002415
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002416 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002417 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002418 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002419 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002420 }
2421
Paolo Bonzini99e86342013-05-06 10:26:13 +02002422 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002423 --fr;
2424 }
2425
2426 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002427 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002428 range = addrrange_intersection(range, fr->addr);
2429 ret.offset_within_region = fr->offset_in_region;
2430 ret.offset_within_region += int128_get64(int128_sub(range.start,
2431 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002432 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002433 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002434 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002435 return ret;
2436}
2437
2438MemoryRegionSection memory_region_find(MemoryRegion *mr,
2439 hwaddr addr, uint64_t size)
2440{
2441 MemoryRegionSection ret;
2442 rcu_read_lock();
2443 ret = memory_region_find_rcu(mr, addr, size);
2444 if (ret.mr) {
2445 memory_region_ref(ret.mr);
2446 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002447 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002448 return ret;
2449}
2450
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002451bool memory_region_present(MemoryRegion *container, hwaddr addr)
2452{
2453 MemoryRegion *mr;
2454
2455 rcu_read_lock();
2456 mr = memory_region_find_rcu(container, addr, 1).mr;
2457 rcu_read_unlock();
2458 return mr && mr != container;
2459}
2460
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002461void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002462{
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002463 MemoryListener *listener;
2464 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002465 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002466 FlatRange *fr;
2467
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002468 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2469 if (!listener->log_sync) {
2470 continue;
2471 }
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002472 as = listener->address_space;
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002473 view = address_space_get_flatview(as);
2474 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonziniadaad612016-09-22 16:09:08 +02002475 if (fr->dirty_log_mask) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002476 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2477
Paolo Bonziniadaad612016-09-22 16:09:08 +02002478 listener->log_sync(listener, &mrs);
2479 }
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002480 }
2481 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002482 }
2483}
2484
Jay Zhou19310762017-07-28 18:28:53 +08002485static VMChangeStateEntry *vmstate_change;
2486
Avi Kivity7664e802011-12-11 14:47:25 +02002487void memory_global_dirty_log_start(void)
2488{
Jay Zhou19310762017-07-28 18:28:53 +08002489 if (vmstate_change) {
2490 qemu_del_vm_change_state_handler(vmstate_change);
2491 vmstate_change = NULL;
2492 }
2493
Avi Kivity7664e802011-12-11 14:47:25 +02002494 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002495
Avi Kivity7376e582012-02-08 21:05:17 +02002496 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002497
2498 /* Refresh DIRTY_LOG_MIGRATION bit. */
2499 memory_region_transaction_begin();
2500 memory_region_update_pending = true;
2501 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002502}
2503
Jay Zhou19310762017-07-28 18:28:53 +08002504static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002505{
Avi Kivity7664e802011-12-11 14:47:25 +02002506 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002507
2508 /* Refresh DIRTY_LOG_MIGRATION bit. */
2509 memory_region_transaction_begin();
2510 memory_region_update_pending = true;
2511 memory_region_transaction_commit();
2512
Avi Kivity7376e582012-02-08 21:05:17 +02002513 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002514}
2515
Jay Zhou19310762017-07-28 18:28:53 +08002516static void memory_vm_change_state_handler(void *opaque, int running,
2517 RunState state)
2518{
2519 if (running) {
2520 memory_global_dirty_log_do_stop();
2521
2522 if (vmstate_change) {
2523 qemu_del_vm_change_state_handler(vmstate_change);
2524 vmstate_change = NULL;
2525 }
2526 }
2527}
2528
2529void memory_global_dirty_log_stop(void)
2530{
2531 if (!runstate_is_running()) {
2532 if (vmstate_change) {
2533 return;
2534 }
2535 vmstate_change = qemu_add_vm_change_state_handler(
2536 memory_vm_change_state_handler, NULL);
2537 return;
2538 }
2539
2540 memory_global_dirty_log_do_stop();
2541}
2542
Avi Kivity7664e802011-12-11 14:47:25 +02002543static void listener_add_address_space(MemoryListener *listener,
2544 AddressSpace *as)
2545{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002546 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002547 FlatRange *fr;
2548
Paolo Bonzini680a4782015-11-02 09:23:52 +01002549 if (listener->begin) {
2550 listener->begin(listener);
2551 }
Avi Kivity7664e802011-12-11 14:47:25 +02002552 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002553 if (listener->log_global_start) {
2554 listener->log_global_start(listener);
2555 }
Avi Kivity7664e802011-12-11 14:47:25 +02002556 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002557
Paolo Bonzini856d7242013-05-06 11:57:21 +02002558 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002559 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity7664e802011-12-11 14:47:25 +02002560 MemoryRegionSection section = {
2561 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002562 .fv = view,
Avi Kivity7664e802011-12-11 14:47:25 +02002563 .offset_within_region = fr->offset_in_region,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002564 .size = fr->addr.size,
Avi Kivity7664e802011-12-11 14:47:25 +02002565 .offset_within_address_space = int128_get64(fr->addr.start),
Avi Kivity7a8499e2012-02-08 17:01:23 +02002566 .readonly = fr->readonly,
Avi Kivity7664e802011-12-11 14:47:25 +02002567 };
Paolo Bonzini680a4782015-11-02 09:23:52 +01002568 if (fr->dirty_log_mask && listener->log_start) {
2569 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2570 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002571 if (listener->region_add) {
2572 listener->region_add(listener, &section);
2573 }
Avi Kivity7664e802011-12-11 14:47:25 +02002574 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002575 if (listener->commit) {
2576 listener->commit(listener);
2577 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002578 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002579}
2580
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002581void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002582{
Avi Kivity72e22d22012-02-08 15:05:50 +02002583 MemoryListener *other = NULL;
2584
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002585 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002586 if (QTAILQ_EMPTY(&memory_listeners)
2587 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2588 memory_listeners)->priority) {
2589 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2590 } else {
2591 QTAILQ_FOREACH(other, &memory_listeners, link) {
2592 if (listener->priority < other->priority) {
2593 break;
2594 }
2595 }
2596 QTAILQ_INSERT_BEFORE(other, listener, link);
2597 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002598
Paolo Bonzini9a546352016-09-22 16:23:06 +02002599 if (QTAILQ_EMPTY(&as->listeners)
2600 || listener->priority >= QTAILQ_LAST(&as->listeners,
2601 memory_listeners)->priority) {
2602 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2603 } else {
2604 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2605 if (listener->priority < other->priority) {
2606 break;
2607 }
2608 }
2609 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2610 }
2611
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002612 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002613}
2614
2615void memory_listener_unregister(MemoryListener *listener)
2616{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002617 if (!listener->address_space) {
2618 return;
2619 }
2620
Avi Kivity72e22d22012-02-08 15:05:50 +02002621 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002622 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002623 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002624}
Avi Kivitye2177952011-12-08 15:00:18 +02002625
KONRAD Fredericc9356742016-10-19 15:06:49 +02002626bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2627{
2628 void *host;
2629 unsigned size = 0;
2630 unsigned offset = 0;
2631 Object *new_interface;
2632
2633 if (!mr || !mr->ops->request_ptr) {
2634 return false;
2635 }
2636
2637 /*
2638 * Avoid an update if the request_ptr call
2639 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2640 * a cache.
2641 */
2642 memory_region_transaction_begin();
2643
2644 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2645
2646 if (!host || !size) {
2647 memory_region_transaction_commit();
2648 return false;
2649 }
2650
2651 new_interface = object_new("mmio_interface");
2652 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2653 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2654 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2655 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2656 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2657 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2658
2659 memory_region_transaction_commit();
2660 return true;
2661}
2662
2663typedef struct MMIOPtrInvalidate {
2664 MemoryRegion *mr;
2665 hwaddr offset;
2666 unsigned size;
2667 int busy;
2668 int allocated;
2669} MMIOPtrInvalidate;
2670
2671#define MAX_MMIO_INVALIDATE 10
2672static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2673
2674static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2675 run_on_cpu_data data)
2676{
2677 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2678 MemoryRegion *mr = invalidate_data->mr;
2679 hwaddr offset = invalidate_data->offset;
2680 unsigned size = invalidate_data->size;
2681 MemoryRegionSection section = memory_region_find(mr, offset, size);
2682
2683 qemu_mutex_lock_iothread();
2684
2685 /* Reset dirty so this doesn't happen later. */
2686 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2687
2688 if (section.mr != mr) {
2689 /* memory_region_find add a ref on section.mr */
2690 memory_region_unref(section.mr);
2691 if (MMIO_INTERFACE(section.mr->owner)) {
2692 /* We found the interface just drop it. */
2693 object_property_set_bool(section.mr->owner, false, "realized",
2694 NULL);
2695 object_unref(section.mr->owner);
2696 object_unparent(section.mr->owner);
2697 }
2698 }
2699
2700 qemu_mutex_unlock_iothread();
2701
2702 if (invalidate_data->allocated) {
2703 g_free(invalidate_data);
2704 } else {
2705 invalidate_data->busy = 0;
2706 }
2707}
2708
2709void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2710 unsigned size)
2711{
2712 size_t i;
2713 MMIOPtrInvalidate *invalidate_data = NULL;
2714
2715 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2716 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2717 invalidate_data = &mmio_ptr_invalidate_list[i];
2718 break;
2719 }
2720 }
2721
2722 if (!invalidate_data) {
2723 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2724 invalidate_data->allocated = 1;
2725 }
2726
2727 invalidate_data->mr = mr;
2728 invalidate_data->offset = offset;
2729 invalidate_data->size = size;
2730
2731 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2732 RUN_ON_CPU_HOST_PTR(invalidate_data));
2733}
2734
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002735void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002736{
Paolo Bonziniac951902015-02-11 15:21:04 +01002737 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002738 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002739 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002740 as->ioeventfd_nb = 0;
2741 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002742 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002743 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002744 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002745 address_space_update_topology(as);
2746 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002747}
Avi Kivity658b2222011-07-26 14:26:08 +03002748
Paolo Bonzini374f2982013-05-17 12:37:03 +02002749static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002750{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002751 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002752
Paolo Bonzini856d7242013-05-06 11:57:21 +02002753 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002754 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002755 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002756 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002757}
2758
Paolo Bonzini374f2982013-05-17 12:37:03 +02002759void address_space_destroy(AddressSpace *as)
2760{
Paolo Bonziniac951902015-02-11 15:21:04 +01002761 MemoryRegion *root = as->root;
2762
Paolo Bonzini374f2982013-05-17 12:37:03 +02002763 /* Flush out anything from MemoryListeners listening in on this */
2764 memory_region_transaction_begin();
2765 as->root = NULL;
2766 memory_region_transaction_commit();
2767 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2768
2769 /* At this point, as->dispatch and as->current_map are dummy
2770 * entries that the guest should never use. Wait for the old
2771 * values to expire before freeing the data.
2772 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002773 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002774 call_rcu(as, do_address_space_destroy, rcu);
2775}
2776
Peter Xu4e831902017-01-16 16:40:04 +08002777static const char *memory_region_type(MemoryRegion *mr)
2778{
2779 if (memory_region_is_ram_device(mr)) {
2780 return "ramd";
2781 } else if (memory_region_is_romd(mr)) {
2782 return "romd";
2783 } else if (memory_region_is_rom(mr)) {
2784 return "rom";
2785 } else if (memory_region_is_ram(mr)) {
2786 return "ram";
2787 } else {
2788 return "i/o";
2789 }
2790}
2791
Blue Swirl314e2982011-09-11 20:22:05 +00002792typedef struct MemoryRegionList MemoryRegionList;
2793
2794struct MemoryRegionList {
2795 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002796 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002797};
2798
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002799typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002800
Peter Xu4e831902017-01-16 16:40:04 +08002801#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2802 int128_sub((size), int128_one())) : 0)
2803#define MTREE_INDENT " "
2804
Blue Swirl314e2982011-09-11 20:22:05 +00002805static void mtree_print_mr(fprintf_function mon_printf, void *f,
2806 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002807 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002808 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002809{
Jan Kiszka9479c572011-09-27 15:00:41 +02002810 MemoryRegionList *new_ml, *ml, *next_ml;
2811 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002812 const MemoryRegion *submr;
2813 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002814 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002815
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002816 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002817 return;
2818 }
2819
2820 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002821 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002822 }
2823
Peter Xub31f8412017-03-14 20:56:27 +08002824 cur_start = base + mr->addr;
2825 cur_end = cur_start + MR_SIZE(mr->size);
2826
2827 /*
2828 * Try to detect overflow of memory region. This should never
2829 * happen normally. When it happens, we dump something to warn the
2830 * user who is observing this.
2831 */
2832 if (cur_start < base || cur_end < cur_start) {
2833 mon_printf(f, "[DETECTED OVERFLOW!] ");
2834 }
2835
Blue Swirl314e2982011-09-11 20:22:05 +00002836 if (mr->alias) {
2837 MemoryRegionList *ml;
2838 bool found = false;
2839
2840 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002841 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002842 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002843 found = true;
2844 }
2845 }
2846
2847 if (!found) {
2848 ml = g_new(MemoryRegionList, 1);
2849 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002850 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002851 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002852 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002853 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002854 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002855 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002856 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002857 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002858 memory_region_name(mr),
2859 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002860 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002861 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002862 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002863 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002864 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002865 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002866 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002867 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002868 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002869 memory_region_name(mr),
2870 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002871 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002872
2873 QTAILQ_INIT(&submr_print_queue);
2874
Blue Swirl314e2982011-09-11 20:22:05 +00002875 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002876 new_ml = g_new(MemoryRegionList, 1);
2877 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002878 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002879 if (new_ml->mr->addr < ml->mr->addr ||
2880 (new_ml->mr->addr == ml->mr->addr &&
2881 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002882 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002883 new_ml = NULL;
2884 break;
2885 }
2886 }
2887 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002888 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002889 }
2890 }
2891
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002892 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002893 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002894 alias_print_queue);
2895 }
2896
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002897 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002898 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002899 }
2900}
2901
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002902struct FlatViewInfo {
2903 fprintf_function mon_printf;
2904 void *f;
2905 int counter;
2906 bool dispatch_tree;
2907};
2908
2909static void mtree_print_flatview(gpointer key, gpointer value,
2910 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08002911{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002912 FlatView *view = key;
2913 GArray *fv_address_spaces = value;
2914 struct FlatViewInfo *fvi = user_data;
2915 fprintf_function p = fvi->mon_printf;
2916 void *f = fvi->f;
Peter Xu57bb40c2017-01-16 16:40:05 +08002917 FlatRange *range = &view->ranges[0];
2918 MemoryRegion *mr;
2919 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002920 int i;
2921 AddressSpace *as;
2922
2923 p(f, "FlatView #%d\n", fvi->counter);
2924 ++fvi->counter;
2925
2926 for (i = 0; i < fv_address_spaces->len; ++i) {
2927 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2928 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2929 if (as->root->alias) {
2930 p(f, ", alias %s", memory_region_name(as->root->alias));
2931 }
2932 p(f, "\n");
2933 }
2934
2935 p(f, " Root memory region: %s\n",
2936 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08002937
2938 if (n <= 0) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002939 p(f, MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08002940 return;
2941 }
2942
2943 while (n--) {
2944 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002945 if (range->offset_in_region) {
2946 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2947 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2948 int128_get64(range->addr.start),
2949 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2950 mr->priority,
2951 range->readonly ? "rom" : memory_region_type(mr),
2952 memory_region_name(mr),
2953 range->offset_in_region);
2954 } else {
2955 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2956 TARGET_FMT_plx " (prio %d, %s): %s\n",
2957 int128_get64(range->addr.start),
2958 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2959 mr->priority,
2960 range->readonly ? "rom" : memory_region_type(mr),
2961 memory_region_name(mr));
2962 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002963 range++;
2964 }
2965
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002966#if !defined(CONFIG_USER_ONLY)
2967 if (fvi->dispatch_tree && view->root) {
2968 mtree_print_dispatch(p, f, view->dispatch, view->root);
2969 }
2970#endif
2971
2972 p(f, "\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08002973}
2974
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002975static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
2976 gpointer user_data)
2977{
2978 FlatView *view = key;
2979 GArray *fv_address_spaces = value;
2980
2981 g_array_unref(fv_address_spaces);
2982 flatview_unref(view);
2983
2984 return true;
2985}
2986
2987void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
2988 bool dispatch_tree)
Blue Swirl314e2982011-09-11 20:22:05 +00002989{
2990 MemoryRegionListHead ml_head;
2991 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02002992 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00002993
Peter Xu57bb40c2017-01-16 16:40:05 +08002994 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002995 FlatView *view;
2996 struct FlatViewInfo fvi = {
2997 .mon_printf = mon_printf,
2998 .f = f,
2999 .counter = 0,
3000 .dispatch_tree = dispatch_tree
3001 };
3002 GArray *fv_address_spaces;
3003 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3004
3005 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003006 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003007 view = address_space_get_flatview(as);
3008
3009 fv_address_spaces = g_hash_table_lookup(views, view);
3010 if (!fv_address_spaces) {
3011 fv_address_spaces = g_array_new(false, false, sizeof(as));
3012 g_hash_table_insert(views, view, fv_address_spaces);
3013 }
3014
3015 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003016 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003017
3018 /* Print */
3019 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3020
3021 /* Free */
3022 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3023 g_hash_table_unref(views);
3024
Peter Xu57bb40c2017-01-16 16:40:05 +08003025 return;
3026 }
3027
Blue Swirl314e2982011-09-11 20:22:05 +00003028 QTAILQ_INIT(&ml_head);
3029
Avi Kivity0d673e32012-10-02 15:28:50 +02003030 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003031 mon_printf(f, "address-space: %s\n", as->name);
3032 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3033 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003034 }
3035
Blue Swirl314e2982011-09-11 20:22:05 +00003036 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003037 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003038 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3039 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3040 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003041 }
3042
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003043 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003044 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003045 }
Blue Swirl314e2982011-09-11 20:22:05 +00003046}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003047
Peter Maydellb08199c2017-07-07 15:42:51 +01003048void memory_region_init_ram(MemoryRegion *mr,
3049 struct Object *owner,
3050 const char *name,
3051 uint64_t size,
3052 Error **errp)
3053{
3054 DeviceState *owner_dev;
3055 Error *err = NULL;
3056
3057 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3058 if (err) {
3059 error_propagate(errp, err);
3060 return;
3061 }
3062 /* This will assert if owner is neither NULL nor a DeviceState.
3063 * We only want the owner here for the purposes of defining a
3064 * unique name for migration. TODO: Ideally we should implement
3065 * a naming scheme for Objects which are not DeviceStates, in
3066 * which case we can relax this restriction.
3067 */
3068 owner_dev = DEVICE(owner);
3069 vmstate_register_ram(mr, owner_dev);
3070}
3071
3072void memory_region_init_rom(MemoryRegion *mr,
3073 struct Object *owner,
3074 const char *name,
3075 uint64_t size,
3076 Error **errp)
3077{
3078 DeviceState *owner_dev;
3079 Error *err = NULL;
3080
3081 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3082 if (err) {
3083 error_propagate(errp, err);
3084 return;
3085 }
3086 /* This will assert if owner is neither NULL nor a DeviceState.
3087 * We only want the owner here for the purposes of defining a
3088 * unique name for migration. TODO: Ideally we should implement
3089 * a naming scheme for Objects which are not DeviceStates, in
3090 * which case we can relax this restriction.
3091 */
3092 owner_dev = DEVICE(owner);
3093 vmstate_register_ram(mr, owner_dev);
3094}
3095
3096void memory_region_init_rom_device(MemoryRegion *mr,
3097 struct Object *owner,
3098 const MemoryRegionOps *ops,
3099 void *opaque,
3100 const char *name,
3101 uint64_t size,
3102 Error **errp)
3103{
3104 DeviceState *owner_dev;
3105 Error *err = NULL;
3106
3107 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3108 name, size, &err);
3109 if (err) {
3110 error_propagate(errp, err);
3111 return;
3112 }
3113 /* This will assert if owner is neither NULL nor a DeviceState.
3114 * We only want the owner here for the purposes of defining a
3115 * unique name for migration. TODO: Ideally we should implement
3116 * a naming scheme for Objects which are not DeviceStates, in
3117 * which case we can relax this restriction.
3118 */
3119 owner_dev = DEVICE(owner);
3120 vmstate_register_ram(mr, owner_dev);
3121}
3122
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003123static const TypeInfo memory_region_info = {
3124 .parent = TYPE_OBJECT,
3125 .name = TYPE_MEMORY_REGION,
3126 .instance_size = sizeof(MemoryRegion),
3127 .instance_init = memory_region_initfn,
3128 .instance_finalize = memory_region_finalize,
3129};
3130
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003131static const TypeInfo iommu_memory_region_info = {
3132 .parent = TYPE_MEMORY_REGION,
3133 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003134 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003135 .instance_size = sizeof(IOMMUMemoryRegion),
3136 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003137 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003138};
3139
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003140static void memory_register_types(void)
3141{
3142 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003143 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003144}
3145
3146type_init(memory_register_types)