blob: 706c38508f056dc92d0d21560d58efd89ba58e77 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020042static bool global_dirty_log = false;
43
Avi Kivity72e22d22012-02-08 15:05:50 +020044static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100050static GHashTable *flat_views;
51
Avi Kivity093bc2c2011-07-26 14:26:01 +030052typedef struct AddrRange AddrRange;
53
Avi Kivity8417ceb2011-08-03 11:56:14 +030054/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080055 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030056 * (large MemoryRegion::alias_offset).
57 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030058struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020059 Int128 start;
60 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030061};
62
Avi Kivity08dafab2011-10-16 13:19:17 +020063static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030064{
65 return (AddrRange) { start, size };
66}
67
68static bool addrrange_equal(AddrRange r1, AddrRange r2)
69{
Avi Kivity08dafab2011-10-16 13:19:17 +020070 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030071}
72
Avi Kivity08dafab2011-10-16 13:19:17 +020073static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030074{
Avi Kivity08dafab2011-10-16 13:19:17 +020075 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030076}
77
Avi Kivity08dafab2011-10-16 13:19:17 +020078static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030079{
Avi Kivity08dafab2011-10-16 13:19:17 +020080 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030081 return range;
82}
83
Avi Kivity08dafab2011-10-16 13:19:17 +020084static bool addrrange_contains(AddrRange range, Int128 addr)
85{
86 return int128_ge(addr, range.start)
87 && int128_lt(addr, addrrange_end(range));
88}
89
Avi Kivity093bc2c2011-07-26 14:26:01 +030090static bool addrrange_intersects(AddrRange r1, AddrRange r2)
91{
Avi Kivity08dafab2011-10-16 13:19:17 +020092 return addrrange_contains(r1, r2.start)
93 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030094}
95
96static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
97{
Avi Kivity08dafab2011-10-16 13:19:17 +020098 Int128 start = int128_max(r1.start, r2.start);
99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
100 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300101}
102
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200103enum ListenerDirection { Forward, Reverse };
104
Avi Kivity7376e582012-02-08 21:05:17 +0200105#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200106 do { \
107 MemoryListener *_listener; \
108 \
109 switch (_direction) { \
110 case Forward: \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
114 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200115 } \
116 break; \
117 case Reverse: \
118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
119 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
Paolo Bonzini9a546352016-09-22 16:23:06 +0200130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200131 do { \
132 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200133 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200134 \
135 switch (_direction) { \
136 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200137 QTAILQ_FOREACH(_listener, list, link_as) { \
138 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200139 _listener->_callback(_listener, _section, ##_args); \
140 } \
141 } \
142 break; \
143 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200144 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
145 link_as) { \
146 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200147 _listener->_callback(_listener, _section, ##_args); \
148 } \
149 } \
150 break; \
151 default: \
152 abort(); \
153 } \
154 } while (0)
155
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200156/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200157#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200158 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000159 MemoryRegionSection mrs = section_from_flat_range(fr, \
160 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200161 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200162 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200163
Avi Kivity093bc2c2011-07-26 14:26:01 +0300164struct CoalescedMemoryRange {
165 AddrRange addr;
166 QTAILQ_ENTRY(CoalescedMemoryRange) link;
167};
168
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300169struct MemoryRegionIoeventfd {
170 AddrRange addr;
171 bool match_data;
172 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200173 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300174};
175
176static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
177 MemoryRegionIoeventfd b)
178{
Avi Kivity08dafab2011-10-16 13:19:17 +0200179 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200181 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200183 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300184 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200185 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300186 return false;
187 } else if (a.match_data < b.match_data) {
188 return true;
189 } else if (a.match_data > b.match_data) {
190 return false;
191 } else if (a.match_data) {
192 if (a.data < b.data) {
193 return true;
194 } else if (a.data > b.data) {
195 return false;
196 }
197 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200198 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300199 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200200 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300201 return false;
202 }
203 return false;
204}
205
206static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
207 MemoryRegionIoeventfd b)
208{
209 return !memory_region_ioeventfd_before(a, b)
210 && !memory_region_ioeventfd_before(b, a);
211}
212
Avi Kivity093bc2c2011-07-26 14:26:01 +0300213typedef struct FlatRange FlatRange;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300214
215/* Range of memory in the global map. Addresses are absolute. */
216struct FlatRange {
217 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200218 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300219 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300220 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200221 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300222 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300223};
224
225/* Flattened global view of current active memory hierarchy. Kept in sorted
226 * order.
227 */
228struct FlatView {
Paolo Bonzini374f2982013-05-17 12:37:03 +0200229 struct rcu_head rcu;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200230 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300231 FlatRange *ranges;
232 unsigned nr;
233 unsigned nr_allocated;
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000234 struct AddressSpaceDispatch *dispatch;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000235 MemoryRegion *root;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300236};
237
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300238typedef struct AddressSpaceOps AddressSpaceOps;
239
Avi Kivity093bc2c2011-07-26 14:26:01 +0300240#define FOR_EACH_FLAT_RANGE(var, view) \
241 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
242
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200243static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000244section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200245{
246 return (MemoryRegionSection) {
247 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000248 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200249 .offset_within_region = fr->offset_in_region,
250 .size = fr->addr.size,
251 .offset_within_address_space = int128_get64(fr->addr.start),
252 .readonly = fr->readonly,
253 };
254}
255
Avi Kivity093bc2c2011-07-26 14:26:01 +0300256static bool flatrange_equal(FlatRange *a, FlatRange *b)
257{
258 return a->mr == b->mr
259 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300260 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200261 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300262 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300263}
264
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000265static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300266{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000267 FlatView *view;
268
269 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200270 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000271 view->root = mr_root;
272 memory_region_ref(mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000273
274 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300275}
276
277/* Insert a range into a given position. Caller is responsible for maintaining
278 * sorting order.
279 */
280static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
281{
282 if (view->nr == view->nr_allocated) {
283 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500284 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300285 view->nr_allocated * sizeof(*view->ranges));
286 }
287 memmove(view->ranges + pos + 1, view->ranges + pos,
288 (view->nr - pos) * sizeof(FlatRange));
289 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200290 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300291 ++view->nr;
292}
293
294static void flatview_destroy(FlatView *view)
295{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200296 int i;
297
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000298 if (view->dispatch) {
299 address_space_dispatch_free(view->dispatch);
300 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200301 for (i = 0; i < view->nr; i++) {
302 memory_region_unref(view->ranges[i].mr);
303 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500304 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000305 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200306 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300307}
308
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200309static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200310{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200311 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200312}
313
314static void flatview_unref(FlatView *view)
315{
316 if (atomic_fetch_dec(&view->ref) == 1) {
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000317 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200318 }
319}
320
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000321FlatView *address_space_to_flatview(AddressSpace *as)
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000322{
323 return atomic_rcu_read(&as->current_map);
324}
325
326AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
327{
328 return fv->dispatch;
329}
330
331AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
332{
333 return flatview_to_dispatch(address_space_to_flatview(as));
334}
335
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300336static bool can_merge(FlatRange *r1, FlatRange *r2)
337{
Avi Kivity08dafab2011-10-16 13:19:17 +0200338 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300339 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200340 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
341 r1->addr.size),
342 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300343 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200344 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300345 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300346}
347
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000348/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300349static void flatview_simplify(FlatView *view)
350{
351 unsigned i, j;
352
353 i = 0;
354 while (i < view->nr) {
355 j = i + 1;
356 while (j < view->nr
357 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200358 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300359 ++j;
360 }
361 ++i;
362 memmove(&view->ranges[i], &view->ranges[j],
363 (view->nr - j) * sizeof(view->ranges[j]));
364 view->nr -= j - i;
365 }
366}
367
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200368static bool memory_region_big_endian(MemoryRegion *mr)
369{
370#ifdef TARGET_WORDS_BIGENDIAN
371 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
372#else
373 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
374#endif
375}
376
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200377static bool memory_region_wrong_endianness(MemoryRegion *mr)
378{
379#ifdef TARGET_WORDS_BIGENDIAN
380 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
381#else
382 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
383#endif
384}
385
386static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
387{
388 if (memory_region_wrong_endianness(mr)) {
389 switch (size) {
390 case 1:
391 break;
392 case 2:
393 *data = bswap16(*data);
394 break;
395 case 4:
396 *data = bswap32(*data);
397 break;
398 case 8:
399 *data = bswap64(*data);
400 break;
401 default:
402 abort();
403 }
404 }
405}
406
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800407static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
408{
409 MemoryRegion *root;
410 hwaddr abs_addr = offset;
411
412 abs_addr += mr->addr;
413 for (root = mr; root->container; ) {
414 root = root->container;
415 abs_addr += root->addr;
416 }
417
418 return abs_addr;
419}
420
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800421static int get_cpu_index(void)
422{
423 if (current_cpu) {
424 return current_cpu->cpu_index;
425 }
426 return -1;
427}
428
Peter Maydellcc05c432015-04-26 16:49:23 +0100429static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
430 hwaddr addr,
431 uint64_t *value,
432 unsigned size,
433 unsigned shift,
434 uint64_t mask,
435 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200436{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200437 uint64_t tmp;
438
439 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800440 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800441 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800442 } else if (mr == &io_mem_notdirty) {
443 /* Accesses to code which has previously been translated into a TB show
444 * up in the MMIO path, as accesses to the io_mem_notdirty
445 * MemoryRegion. */
446 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800447 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
448 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800449 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800450 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200451 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100452 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200453}
454
Peter Maydellcc05c432015-04-26 16:49:23 +0100455static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
456 hwaddr addr,
457 uint64_t *value,
458 unsigned size,
459 unsigned shift,
460 uint64_t mask,
461 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300462{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300463 uint64_t tmp;
464
465 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800466 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800467 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800468 } else if (mr == &io_mem_notdirty) {
469 /* Accesses to code which has previously been translated into a TB show
470 * up in the MMIO path, as accesses to the io_mem_notdirty
471 * MemoryRegion. */
472 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800473 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
474 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800475 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800476 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300477 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100478 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300479}
480
Peter Maydellcc05c432015-04-26 16:49:23 +0100481static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
482 hwaddr addr,
483 uint64_t *value,
484 unsigned size,
485 unsigned shift,
486 uint64_t mask,
487 MemTxAttrs attrs)
488{
489 uint64_t tmp = 0;
490 MemTxResult r;
491
Peter Maydellcc05c432015-04-26 16:49:23 +0100492 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800493 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800494 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800495 } else if (mr == &io_mem_notdirty) {
496 /* Accesses to code which has previously been translated into a TB show
497 * up in the MMIO path, as accesses to the io_mem_notdirty
498 * MemoryRegion. */
499 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800500 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
501 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800502 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800503 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100504 *value |= (tmp & mask) << shift;
505 return r;
506}
507
508static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
509 hwaddr addr,
510 uint64_t *value,
511 unsigned size,
512 unsigned shift,
513 uint64_t mask,
514 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200515{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200516 uint64_t tmp;
517
518 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800519 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800520 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800521 } else if (mr == &io_mem_notdirty) {
522 /* Accesses to code which has previously been translated into a TB show
523 * up in the MMIO path, as accesses to the io_mem_notdirty
524 * MemoryRegion. */
525 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800526 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
527 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800528 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800529 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200530 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100531 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200532}
533
Peter Maydellcc05c432015-04-26 16:49:23 +0100534static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
535 hwaddr addr,
536 uint64_t *value,
537 unsigned size,
538 unsigned shift,
539 uint64_t mask,
540 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300541{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300542 uint64_t tmp;
543
544 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800545 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800546 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800547 } else if (mr == &io_mem_notdirty) {
548 /* Accesses to code which has previously been translated into a TB show
549 * up in the MMIO path, as accesses to the io_mem_notdirty
550 * MemoryRegion. */
551 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800552 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
553 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800554 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800555 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300556 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100557 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300558}
559
Peter Maydellcc05c432015-04-26 16:49:23 +0100560static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
561 hwaddr addr,
562 uint64_t *value,
563 unsigned size,
564 unsigned shift,
565 uint64_t mask,
566 MemTxAttrs attrs)
567{
568 uint64_t tmp;
569
Peter Maydellcc05c432015-04-26 16:49:23 +0100570 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800571 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800572 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800573 } else if (mr == &io_mem_notdirty) {
574 /* Accesses to code which has previously been translated into a TB show
575 * up in the MMIO path, as accesses to the io_mem_notdirty
576 * MemoryRegion. */
577 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800578 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
579 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800580 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800581 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100582 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
583}
584
585static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300586 uint64_t *value,
587 unsigned size,
588 unsigned access_size_min,
589 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200590 MemTxResult (*access_fn)
591 (MemoryRegion *mr,
592 hwaddr addr,
593 uint64_t *value,
594 unsigned size,
595 unsigned shift,
596 uint64_t mask,
597 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100598 MemoryRegion *mr,
599 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300600{
601 uint64_t access_mask;
602 unsigned access_size;
603 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100604 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300605
606 if (!access_size_min) {
607 access_size_min = 1;
608 }
609 if (!access_size_max) {
610 access_size_max = 4;
611 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200612
613 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300614 access_size = MAX(MIN(size, access_size_max), access_size_min);
615 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200616 if (memory_region_big_endian(mr)) {
617 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200618 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100619 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200620 }
621 } else {
622 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200623 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100624 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200625 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300626 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100627 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300628}
629
Avi Kivitye2177952011-12-08 15:00:18 +0200630static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
631{
Avi Kivity0d673e32012-10-02 15:28:50 +0200632 AddressSpace *as;
633
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200634 while (mr->container) {
635 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200636 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200637 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
638 if (mr == as->root) {
639 return as;
640 }
Avi Kivitye2177952011-12-08 15:00:18 +0200641 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200642 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200643}
644
Avi Kivity093bc2c2011-07-26 14:26:01 +0300645/* Render a memory region into the global view. Ranges in @view obscure
646 * ranges in @mr.
647 */
648static void render_memory_region(FlatView *view,
649 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200650 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300651 AddrRange clip,
652 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300653{
654 MemoryRegion *subregion;
655 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200656 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200657 Int128 remain;
658 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300659 FlatRange fr;
660 AddrRange tmp;
661
Avi Kivity6bba19b2011-09-14 11:54:58 +0300662 if (!mr->enabled) {
663 return;
664 }
665
Avi Kivity08dafab2011-10-16 13:19:17 +0200666 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300667 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300668
669 tmp = addrrange_make(base, mr->size);
670
671 if (!addrrange_intersects(tmp, clip)) {
672 return;
673 }
674
675 clip = addrrange_intersection(tmp, clip);
676
677 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200678 int128_subfrom(&base, int128_make64(mr->alias->addr));
679 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300680 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300681 return;
682 }
683
684 /* Render subregions in priority order. */
685 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300686 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300687 }
688
Avi Kivity14a3c102011-07-26 14:26:06 +0300689 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300690 return;
691 }
692
Avi Kivity08dafab2011-10-16 13:19:17 +0200693 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300694 base = clip.start;
695 remain = clip.size;
696
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000697 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100698 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200699 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000700 fr.readonly = readonly;
701
Avi Kivity093bc2c2011-07-26 14:26:01 +0300702 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200703 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
704 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300705 continue;
706 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200707 if (int128_lt(base, view->ranges[i].addr.start)) {
708 now = int128_min(remain,
709 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300710 fr.offset_in_region = offset_in_region;
711 fr.addr = addrrange_make(base, now);
712 flatview_insert(view, i, &fr);
713 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200714 int128_addto(&base, now);
715 offset_in_region += int128_get64(now);
716 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300717 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200718 now = int128_sub(int128_min(int128_add(base, remain),
719 addrrange_end(view->ranges[i].addr)),
720 base);
721 int128_addto(&base, now);
722 offset_in_region += int128_get64(now);
723 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300724 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200725 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300726 fr.offset_in_region = offset_in_region;
727 fr.addr = addrrange_make(base, remain);
728 flatview_insert(view, i, &fr);
729 }
730}
731
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000732static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
733{
734 while (mr->alias && !mr->alias_offset &&
735 int128_ge(mr->size, mr->alias->size)) {
736 /* The alias is included in its entirety. Use it as
737 * the "real" root, so that we can share more FlatViews.
738 */
739 mr = mr->alias;
740 }
741
742 return mr;
743}
744
Avi Kivity093bc2c2011-07-26 14:26:01 +0300745/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200746static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300747{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000748 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200749 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300750
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000751 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300752
Avi Kivity83f3c252012-10-07 12:59:55 +0200753 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200754 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200755 addrrange_make(int128_zero(), int128_2_64()), false);
756 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200757 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300758
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000759 view->dispatch = address_space_dispatch_new(view);
760 for (i = 0; i < view->nr; i++) {
761 MemoryRegionSection mrs =
762 section_from_flat_range(&view->ranges[i], view);
763 flatview_add_to_dispatch(view, &mrs);
764 }
765 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000766 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000767
Avi Kivity093bc2c2011-07-26 14:26:01 +0300768 return view;
769}
770
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300771static void address_space_add_del_ioeventfds(AddressSpace *as,
772 MemoryRegionIoeventfd *fds_new,
773 unsigned fds_new_nb,
774 MemoryRegionIoeventfd *fds_old,
775 unsigned fds_old_nb)
776{
777 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200778 MemoryRegionIoeventfd *fd;
779 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300780
781 /* Generate a symmetric difference of the old and new fd sets, adding
782 * and deleting as necessary.
783 */
784
785 iold = inew = 0;
786 while (iold < fds_old_nb || inew < fds_new_nb) {
787 if (iold < fds_old_nb
788 && (inew == fds_new_nb
789 || memory_region_ioeventfd_before(fds_old[iold],
790 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200791 fd = &fds_old[iold];
792 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000793 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200794 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200795 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200796 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200797 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200798 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300799 ++iold;
800 } else if (inew < fds_new_nb
801 && (iold == fds_old_nb
802 || memory_region_ioeventfd_before(fds_new[inew],
803 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200804 fd = &fds_new[inew];
805 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000806 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200807 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200808 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200809 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200810 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200811 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300812 ++inew;
813 } else {
814 ++iold;
815 ++inew;
816 }
817 }
818}
819
Paolo Bonzini856d7242013-05-06 11:57:21 +0200820static FlatView *address_space_get_flatview(AddressSpace *as)
821{
822 FlatView *view;
823
Paolo Bonzini374f2982013-05-17 12:37:03 +0200824 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200825 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000826 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200827 /* If somebody has replaced as->current_map concurrently,
828 * flatview_ref returns false.
829 */
830 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200831 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200832 return view;
833}
834
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300835static void address_space_update_ioeventfds(AddressSpace *as)
836{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200837 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300838 FlatRange *fr;
839 unsigned ioeventfd_nb = 0;
840 MemoryRegionIoeventfd *ioeventfds = NULL;
841 AddrRange tmp;
842 unsigned i;
843
Paolo Bonzini856d7242013-05-06 11:57:21 +0200844 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200845 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300846 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
847 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200848 int128_sub(fr->addr.start,
849 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300850 if (addrrange_intersects(fr->addr, tmp)) {
851 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500852 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300853 ioeventfd_nb * sizeof(*ioeventfds));
854 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
855 ioeventfds[ioeventfd_nb-1].addr = tmp;
856 }
857 }
858 }
859
860 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
861 as->ioeventfds, as->ioeventfd_nb);
862
Anthony Liguori7267c092011-08-20 22:09:37 -0500863 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300864 as->ioeventfds = ioeventfds;
865 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200866 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300867}
868
Avi Kivityb8af1af2011-07-26 14:26:12 +0300869static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200870 const FlatView *old_view,
871 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300872 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300873{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300874 unsigned iold, inew;
875 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300876
877 /* Generate a symmetric difference of the old and new memory maps.
878 * Kill ranges in the old map, and instantiate ranges in the new map.
879 */
880 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200881 while (iold < old_view->nr || inew < new_view->nr) {
882 if (iold < old_view->nr) {
883 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300884 } else {
885 frold = NULL;
886 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200887 if (inew < new_view->nr) {
888 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300889 } else {
890 frnew = NULL;
891 }
892
893 if (frold
894 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200895 || int128_lt(frold->addr.start, frnew->addr.start)
896 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300897 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000898 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300899
Avi Kivityb8af1af2011-07-26 14:26:12 +0300900 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200901 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300902 }
903
Avi Kivity093bc2c2011-07-26 14:26:01 +0300904 ++iold;
905 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000906 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300907
Avi Kivityb8af1af2011-07-26 14:26:12 +0300908 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200909 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200910 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
911 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
912 frold->dirty_log_mask,
913 frnew->dirty_log_mask);
914 }
915 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
916 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
917 frold->dirty_log_mask,
918 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300919 }
Avi Kivity5a583342011-07-26 14:26:02 +0300920 }
921
Avi Kivity093bc2c2011-07-26 14:26:01 +0300922 ++iold;
923 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300924 } else {
925 /* In new */
926
Avi Kivityb8af1af2011-07-26 14:26:12 +0300927 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200928 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300929 }
930
Avi Kivity093bc2c2011-07-26 14:26:01 +0300931 ++inew;
932 }
933 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300934}
935
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000936static void flatviews_init(void)
937{
938 if (flat_views) {
939 return;
940 }
941
942 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
943 (GDestroyNotify) flatview_unref);
944}
945
946static void flatviews_reset(void)
947{
948 AddressSpace *as;
949
950 if (flat_views) {
951 g_hash_table_unref(flat_views);
952 flat_views = NULL;
953 }
954 flatviews_init();
955
956 /* Render unique FVs */
957 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
958 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
959
960 if (g_hash_table_lookup(flat_views, physmr)) {
961 continue;
962 }
963
964 generate_memory_topology(physmr);
965 }
966}
967
968static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +0300969{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000970 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000971 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
972 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
973
974 assert(new_view);
975
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000976 if (old_view == new_view) {
977 return;
978 }
979
980 if (old_view) {
981 flatview_ref(old_view);
982 }
983
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000984 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000985
986 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000987 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
988
989 if (!old_view2) {
990 old_view2 = &tmpview;
991 }
992 address_space_update_topology_pass(as, old_view2, new_view, false);
993 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000994 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300995
Paolo Bonzini374f2982013-05-17 12:37:03 +0200996 /* Writes are protected by the BQL. */
997 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000998 if (old_view) {
999 flatview_unref(old_view);
1000 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001001
1002 /* Note that all the old MemoryRegions are still alive up to this
1003 * point. This relieves most MemoryListeners from the need to
1004 * ref/unref the MemoryRegions they get---unless they use them
1005 * outside the iothread mutex, in which case precise reference
1006 * counting is necessary.
1007 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001008 if (old_view) {
1009 flatview_unref(old_view);
1010 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001011}
1012
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001013static void address_space_update_topology(AddressSpace *as)
1014{
1015 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1016
1017 flatviews_init();
1018 if (!g_hash_table_lookup(flat_views, physmr)) {
1019 generate_memory_topology(physmr);
1020 }
1021 address_space_set_flatview(as);
1022}
1023
Avi Kivity4ef4db82011-07-26 14:26:13 +03001024void memory_region_transaction_begin(void)
1025{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001026 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001027 ++memory_region_transaction_depth;
1028}
1029
1030void memory_region_transaction_commit(void)
1031{
Avi Kivity0d673e32012-10-02 15:28:50 +02001032 AddressSpace *as;
1033
Avi Kivity4ef4db82011-07-26 14:26:13 +03001034 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001035 assert(qemu_mutex_iothread_locked());
1036
Avi Kivity4ef4db82011-07-26 14:26:13 +03001037 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001038 if (!memory_region_transaction_depth) {
1039 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001040 flatviews_reset();
1041
Gonglei4dc56152014-05-08 11:47:32 +08001042 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001043
Gonglei4dc56152014-05-08 11:47:32 +08001044 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001045 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001046 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001047 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001048 memory_region_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001049 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1050 } else if (ioeventfd_update_pending) {
1051 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1052 address_space_update_ioeventfds(as);
1053 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001054 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001055 }
Gonglei4dc56152014-05-08 11:47:32 +08001056 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001057}
1058
Avi Kivity545e92e2011-08-08 19:58:48 +03001059static void memory_region_destructor_none(MemoryRegion *mr)
1060{
1061}
1062
1063static void memory_region_destructor_ram(MemoryRegion *mr)
1064{
Fam Zhengf1060c52016-03-01 14:18:22 +08001065 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001066}
1067
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001068static bool memory_region_need_escape(char c)
1069{
1070 return c == '/' || c == '[' || c == '\\' || c == ']';
1071}
1072
1073static char *memory_region_escape_name(const char *name)
1074{
1075 const char *p;
1076 char *escaped, *q;
1077 uint8_t c;
1078 size_t bytes = 0;
1079
1080 for (p = name; *p; p++) {
1081 bytes += memory_region_need_escape(*p) ? 4 : 1;
1082 }
1083 if (bytes == p - name) {
1084 return g_memdup(name, bytes + 1);
1085 }
1086
1087 escaped = g_malloc(bytes + 1);
1088 for (p = name, q = escaped; *p; p++) {
1089 c = *p;
1090 if (unlikely(memory_region_need_escape(c))) {
1091 *q++ = '\\';
1092 *q++ = 'x';
1093 *q++ = "0123456789abcdef"[c >> 4];
1094 c = "0123456789abcdef"[c & 15];
1095 }
1096 *q++ = c;
1097 }
1098 *q = 0;
1099 return escaped;
1100}
1101
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001102static void memory_region_do_init(MemoryRegion *mr,
1103 Object *owner,
1104 const char *name,
1105 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001106{
Avi Kivity08dafab2011-10-16 13:19:17 +02001107 mr->size = int128_make64(size);
1108 if (size == UINT64_MAX) {
1109 mr->size = int128_2_64();
1110 }
Peter Maydell302fa282014-08-19 20:05:46 +01001111 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001112 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001113 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001114
1115 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001116 char *escaped_name = memory_region_escape_name(name);
1117 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001118
1119 if (!owner) {
1120 owner = container_get(qdev_get_machine(), "/unattached");
1121 }
1122
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001123 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001124 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001125 g_free(name_array);
1126 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001127 }
1128}
1129
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001130void memory_region_init(MemoryRegion *mr,
1131 Object *owner,
1132 const char *name,
1133 uint64_t size)
1134{
1135 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1136 memory_region_do_init(mr, owner, name, size);
1137}
1138
Eric Blaked7bce992016-01-29 06:48:55 -07001139static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1140 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001141{
1142 MemoryRegion *mr = MEMORY_REGION(obj);
1143 uint64_t value = mr->addr;
1144
Eric Blake51e72bc2016-01-29 06:48:54 -07001145 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001146}
1147
Eric Blaked7bce992016-01-29 06:48:55 -07001148static void memory_region_get_container(Object *obj, Visitor *v,
1149 const char *name, void *opaque,
1150 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001151{
1152 MemoryRegion *mr = MEMORY_REGION(obj);
1153 gchar *path = (gchar *)"";
1154
1155 if (mr->container) {
1156 path = object_get_canonical_path(OBJECT(mr->container));
1157 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001158 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001159 if (mr->container) {
1160 g_free(path);
1161 }
1162}
1163
1164static Object *memory_region_resolve_container(Object *obj, void *opaque,
1165 const char *part)
1166{
1167 MemoryRegion *mr = MEMORY_REGION(obj);
1168
1169 return OBJECT(mr->container);
1170}
1171
Eric Blaked7bce992016-01-29 06:48:55 -07001172static void memory_region_get_priority(Object *obj, Visitor *v,
1173 const char *name, void *opaque,
1174 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001175{
1176 MemoryRegion *mr = MEMORY_REGION(obj);
1177 int32_t value = mr->priority;
1178
Eric Blake51e72bc2016-01-29 06:48:54 -07001179 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001180}
1181
Eric Blaked7bce992016-01-29 06:48:55 -07001182static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1183 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001184{
1185 MemoryRegion *mr = MEMORY_REGION(obj);
1186 uint64_t value = memory_region_size(mr);
1187
Eric Blake51e72bc2016-01-29 06:48:54 -07001188 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001189}
1190
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001191static void memory_region_initfn(Object *obj)
1192{
1193 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001194 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001195
1196 mr->ops = &unassigned_mem_ops;
1197 mr->enabled = true;
1198 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001199 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001200 mr->destructor = memory_region_destructor_none;
1201 QTAILQ_INIT(&mr->subregions);
1202 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001203
1204 op = object_property_add(OBJECT(mr), "container",
1205 "link<" TYPE_MEMORY_REGION ">",
1206 memory_region_get_container,
1207 NULL, /* memory_region_set_container */
1208 NULL, NULL, &error_abort);
1209 op->resolve = memory_region_resolve_container;
1210
1211 object_property_add(OBJECT(mr), "addr", "uint64",
1212 memory_region_get_addr,
1213 NULL, /* memory_region_set_addr */
1214 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001215 object_property_add(OBJECT(mr), "priority", "uint32",
1216 memory_region_get_priority,
1217 NULL, /* memory_region_set_priority */
1218 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001219 object_property_add(OBJECT(mr), "size", "uint64",
1220 memory_region_get_size,
1221 NULL, /* memory_region_set_size, */
1222 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001223}
1224
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001225static void iommu_memory_region_initfn(Object *obj)
1226{
1227 MemoryRegion *mr = MEMORY_REGION(obj);
1228
1229 mr->is_iommu = true;
1230}
1231
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001232static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1233 unsigned size)
1234{
1235#ifdef DEBUG_UNASSIGNED
1236 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1237#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001238 if (current_cpu != NULL) {
1239 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001240 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001241 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001242}
1243
1244static void unassigned_mem_write(void *opaque, hwaddr addr,
1245 uint64_t val, unsigned size)
1246{
1247#ifdef DEBUG_UNASSIGNED
1248 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1249#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001250 if (current_cpu != NULL) {
1251 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001252 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001253}
1254
Paolo Bonzinid1970632013-05-24 13:23:38 +02001255static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1256 unsigned size, bool is_write)
1257{
1258 return false;
1259}
1260
1261const MemoryRegionOps unassigned_mem_ops = {
1262 .valid.accepts = unassigned_mem_accepts,
1263 .endianness = DEVICE_NATIVE_ENDIAN,
1264};
1265
Alex Williamson4a2e2422016-10-31 09:53:03 -06001266static uint64_t memory_region_ram_device_read(void *opaque,
1267 hwaddr addr, unsigned size)
1268{
1269 MemoryRegion *mr = opaque;
1270 uint64_t data = (uint64_t)~0;
1271
1272 switch (size) {
1273 case 1:
1274 data = *(uint8_t *)(mr->ram_block->host + addr);
1275 break;
1276 case 2:
1277 data = *(uint16_t *)(mr->ram_block->host + addr);
1278 break;
1279 case 4:
1280 data = *(uint32_t *)(mr->ram_block->host + addr);
1281 break;
1282 case 8:
1283 data = *(uint64_t *)(mr->ram_block->host + addr);
1284 break;
1285 }
1286
1287 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1288
1289 return data;
1290}
1291
1292static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1293 uint64_t data, unsigned size)
1294{
1295 MemoryRegion *mr = opaque;
1296
1297 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1298
1299 switch (size) {
1300 case 1:
1301 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1302 break;
1303 case 2:
1304 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1305 break;
1306 case 4:
1307 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1308 break;
1309 case 8:
1310 *(uint64_t *)(mr->ram_block->host + addr) = data;
1311 break;
1312 }
1313}
1314
1315static const MemoryRegionOps ram_device_mem_ops = {
1316 .read = memory_region_ram_device_read,
1317 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001318 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001319 .valid = {
1320 .min_access_size = 1,
1321 .max_access_size = 8,
1322 .unaligned = true,
1323 },
1324 .impl = {
1325 .min_access_size = 1,
1326 .max_access_size = 8,
1327 .unaligned = true,
1328 },
1329};
1330
Paolo Bonzinid2702032013-05-24 11:55:06 +02001331bool memory_region_access_valid(MemoryRegion *mr,
1332 hwaddr addr,
1333 unsigned size,
1334 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001335{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001336 int access_size_min, access_size_max;
1337 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001338
Avi Kivity093bc2c2011-07-26 14:26:01 +03001339 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1340 return false;
1341 }
1342
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001343 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001344 return true;
1345 }
1346
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001347 access_size_min = mr->ops->valid.min_access_size;
1348 if (!mr->ops->valid.min_access_size) {
1349 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001350 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001351
1352 access_size_max = mr->ops->valid.max_access_size;
1353 if (!mr->ops->valid.max_access_size) {
1354 access_size_max = 4;
1355 }
1356
1357 access_size = MAX(MIN(size, access_size_max), access_size_min);
1358 for (i = 0; i < size; i += access_size) {
1359 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1360 is_write)) {
1361 return false;
1362 }
1363 }
1364
Avi Kivity093bc2c2011-07-26 14:26:01 +03001365 return true;
1366}
1367
Peter Maydellcc05c432015-04-26 16:49:23 +01001368static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1369 hwaddr addr,
1370 uint64_t *pval,
1371 unsigned size,
1372 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001373{
Peter Maydellcc05c432015-04-26 16:49:23 +01001374 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001375
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001376 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001377 return access_with_adjusted_size(addr, pval, size,
1378 mr->ops->impl.min_access_size,
1379 mr->ops->impl.max_access_size,
1380 memory_region_read_accessor,
1381 mr, attrs);
1382 } else if (mr->ops->read_with_attrs) {
1383 return access_with_adjusted_size(addr, pval, size,
1384 mr->ops->impl.min_access_size,
1385 mr->ops->impl.max_access_size,
1386 memory_region_read_with_attrs_accessor,
1387 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001388 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001389 return access_with_adjusted_size(addr, pval, size, 1, 4,
1390 memory_region_oldmmio_read_accessor,
1391 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001392 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001393}
1394
Peter Maydell3b643492015-04-26 16:49:23 +01001395MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1396 hwaddr addr,
1397 uint64_t *pval,
1398 unsigned size,
1399 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001400{
Peter Maydellcc05c432015-04-26 16:49:23 +01001401 MemTxResult r;
1402
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001403 if (!memory_region_access_valid(mr, addr, size, false)) {
1404 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001405 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001406 }
Avi Kivitya621f382012-01-02 13:12:08 +02001407
Peter Maydellcc05c432015-04-26 16:49:23 +01001408 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001409 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001410 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001411}
1412
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001413/* Return true if an eventfd was signalled */
1414static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1415 hwaddr addr,
1416 uint64_t data,
1417 unsigned size,
1418 MemTxAttrs attrs)
1419{
1420 MemoryRegionIoeventfd ioeventfd = {
1421 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1422 .data = data,
1423 };
1424 unsigned i;
1425
1426 for (i = 0; i < mr->ioeventfd_nb; i++) {
1427 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1428 ioeventfd.e = mr->ioeventfds[i].e;
1429
1430 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1431 event_notifier_set(ioeventfd.e);
1432 return true;
1433 }
1434 }
1435
1436 return false;
1437}
1438
Peter Maydell3b643492015-04-26 16:49:23 +01001439MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1440 hwaddr addr,
1441 uint64_t data,
1442 unsigned size,
1443 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001444{
Avi Kivity897fa7c2011-11-13 13:05:27 +02001445 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001446 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001447 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001448 }
1449
Avi Kivitya621f382012-01-02 13:12:08 +02001450 adjust_endianness(mr, &data, size);
1451
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001452 if ((!kvm_eventfds_enabled()) &&
1453 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1454 return MEMTX_OK;
1455 }
1456
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001457 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001458 return access_with_adjusted_size(addr, &data, size,
1459 mr->ops->impl.min_access_size,
1460 mr->ops->impl.max_access_size,
1461 memory_region_write_accessor, mr,
1462 attrs);
1463 } else if (mr->ops->write_with_attrs) {
1464 return
1465 access_with_adjusted_size(addr, &data, size,
1466 mr->ops->impl.min_access_size,
1467 mr->ops->impl.max_access_size,
1468 memory_region_write_with_attrs_accessor,
1469 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001470 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001471 return access_with_adjusted_size(addr, &data, size, 1, 4,
1472 memory_region_oldmmio_write_accessor,
1473 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001474 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001475}
1476
Avi Kivity093bc2c2011-07-26 14:26:01 +03001477void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001478 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001479 const MemoryRegionOps *ops,
1480 void *opaque,
1481 const char *name,
1482 uint64_t size)
1483{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001484 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001485 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001486 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001487 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001488}
1489
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001490void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1491 Object *owner,
1492 const char *name,
1493 uint64_t size,
1494 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001495{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001496 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001497 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001498 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001499 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001500 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001501 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001502}
1503
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001504void memory_region_init_resizeable_ram(MemoryRegion *mr,
1505 Object *owner,
1506 const char *name,
1507 uint64_t size,
1508 uint64_t max_size,
1509 void (*resized)(const char*,
1510 uint64_t length,
1511 void *host),
1512 Error **errp)
1513{
1514 memory_region_init(mr, owner, name, size);
1515 mr->ram = true;
1516 mr->terminates = true;
1517 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001518 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1519 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001520 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001521}
1522
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001523#ifdef __linux__
1524void memory_region_init_ram_from_file(MemoryRegion *mr,
1525 struct Object *owner,
1526 const char *name,
1527 uint64_t size,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001528 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001529 const char *path,
1530 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001531{
1532 memory_region_init(mr, owner, name, size);
1533 mr->ram = true;
1534 mr->terminates = true;
1535 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001536 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001537 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001538}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001539
1540void memory_region_init_ram_from_fd(MemoryRegion *mr,
1541 struct Object *owner,
1542 const char *name,
1543 uint64_t size,
1544 bool share,
1545 int fd,
1546 Error **errp)
1547{
1548 memory_region_init(mr, owner, name, size);
1549 mr->ram = true;
1550 mr->terminates = true;
1551 mr->destructor = memory_region_destructor_ram;
1552 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1553 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1554}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001555#endif
1556
Avi Kivity093bc2c2011-07-26 14:26:01 +03001557void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001558 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001559 const char *name,
1560 uint64_t size,
1561 void *ptr)
1562{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001563 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001564 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001565 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001566 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001567 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001568
1569 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1570 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001571 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001572}
1573
Alex Williamson21e00fa2016-10-31 09:53:03 -06001574void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1575 Object *owner,
1576 const char *name,
1577 uint64_t size,
1578 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301579{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001580 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1581 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001582 mr->ops = &ram_device_mem_ops;
1583 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301584}
1585
Avi Kivity093bc2c2011-07-26 14:26:01 +03001586void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001587 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001588 const char *name,
1589 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001590 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001591 uint64_t size)
1592{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001593 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001594 mr->alias = orig;
1595 mr->alias_offset = offset;
1596}
1597
Peter Maydellb59821a2017-07-07 15:42:50 +01001598void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1599 struct Object *owner,
1600 const char *name,
1601 uint64_t size,
1602 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001603{
1604 memory_region_init(mr, owner, name, size);
1605 mr->ram = true;
1606 mr->readonly = true;
1607 mr->terminates = true;
1608 mr->destructor = memory_region_destructor_ram;
1609 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1610 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1611}
1612
Peter Maydellb59821a2017-07-07 15:42:50 +01001613void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1614 Object *owner,
1615 const MemoryRegionOps *ops,
1616 void *opaque,
1617 const char *name,
1618 uint64_t size,
1619 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001620{
Peter Maydell39e0b032016-07-04 13:06:35 +01001621 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001622 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001623 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001624 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001625 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001626 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001627 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001628 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001629}
1630
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001631void memory_region_init_iommu(void *_iommu_mr,
1632 size_t instance_size,
1633 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001634 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001635 const char *name,
1636 uint64_t size)
1637{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001638 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001639 struct MemoryRegion *mr;
1640
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001641 object_initialize(_iommu_mr, instance_size, mrtypename);
1642 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001643 memory_region_do_init(mr, owner, name, size);
1644 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001645 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001646 QLIST_INIT(&iommu_mr->iommu_notify);
1647 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001648}
1649
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001650static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001651{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001652 MemoryRegion *mr = MEMORY_REGION(obj);
1653
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001654 assert(!mr->container);
1655
1656 /* We know the region is not visible in any address space (it
1657 * does not have a container and cannot be a root either because
1658 * it has no references, so we can blindly clear mr->enabled.
1659 * memory_region_set_enabled instead could trigger a transaction
1660 * and cause an infinite loop.
1661 */
1662 mr->enabled = false;
1663 memory_region_transaction_begin();
1664 while (!QTAILQ_EMPTY(&mr->subregions)) {
1665 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1666 memory_region_del_subregion(mr, subregion);
1667 }
1668 memory_region_transaction_commit();
1669
Avi Kivity545e92e2011-08-08 19:58:48 +03001670 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001671 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001672 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001673 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001674}
1675
Paolo Bonzini803c0812013-05-07 06:59:09 +02001676Object *memory_region_owner(MemoryRegion *mr)
1677{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001678 Object *obj = OBJECT(mr);
1679 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001680}
1681
Paolo Bonzini46637be2013-05-07 09:06:00 +02001682void memory_region_ref(MemoryRegion *mr)
1683{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001684 /* MMIO callbacks most likely will access data that belongs
1685 * to the owner, hence the need to ref/unref the owner whenever
1686 * the memory region is in use.
1687 *
1688 * The memory region is a child of its owner. As long as the
1689 * owner doesn't call unparent itself on the memory region,
1690 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001691 * Memory regions without an owner are supposed to never go away;
1692 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001693 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001694 if (mr && mr->owner) {
1695 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001696 }
1697}
1698
1699void memory_region_unref(MemoryRegion *mr)
1700{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001701 if (mr && mr->owner) {
1702 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001703 }
1704}
1705
Avi Kivity093bc2c2011-07-26 14:26:01 +03001706uint64_t memory_region_size(MemoryRegion *mr)
1707{
Avi Kivity08dafab2011-10-16 13:19:17 +02001708 if (int128_eq(mr->size, int128_2_64())) {
1709 return UINT64_MAX;
1710 }
1711 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001712}
1713
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001714const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001715{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001716 if (!mr->name) {
1717 ((MemoryRegion *)mr)->name =
1718 object_get_canonical_path_component(OBJECT(mr));
1719 }
Peter Maydell302fa282014-08-19 20:05:46 +01001720 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001721}
1722
Alex Williamson21e00fa2016-10-31 09:53:03 -06001723bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301724{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001725 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301726}
1727
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001728uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001729{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001730 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001731 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001732 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1733 }
1734 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001735}
1736
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001737bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1738{
1739 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1740}
1741
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001742static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001743{
1744 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1745 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001746 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001747
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001748 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001749 flags |= iommu_notifier->notifier_flags;
1750 }
1751
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001752 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1753 imrc->notify_flag_changed(iommu_mr,
1754 iommu_mr->iommu_notify_flags,
1755 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001756 }
1757
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001758 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001759}
1760
Peter Xucdb30812016-09-23 13:02:26 +08001761void memory_region_register_iommu_notifier(MemoryRegion *mr,
1762 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001763{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001764 IOMMUMemoryRegion *iommu_mr;
1765
Jason Wangefcd38c2016-12-30 18:09:17 +08001766 if (mr->alias) {
1767 memory_region_register_iommu_notifier(mr->alias, n);
1768 return;
1769 }
1770
Peter Xucdb30812016-09-23 13:02:26 +08001771 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001772 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001773 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001774 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001775 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1776 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001777}
1778
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001779uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001780{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001781 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1782
1783 if (imrc->get_min_page_size) {
1784 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001785 }
1786 return TARGET_PAGE_SIZE;
1787}
1788
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001789void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001790{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001791 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001792 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001793 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001794 IOMMUTLBEntry iotlb;
1795
Peter Xufaa362e2017-04-07 18:59:11 +08001796 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001797 if (imrc->replay) {
1798 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001799 return;
1800 }
1801
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001802 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001803
David Gibsona788f222015-09-30 12:13:55 +10001804 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001805 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001806 if (iotlb.perm != IOMMU_NONE) {
1807 n->notify(n, &iotlb);
1808 }
1809
1810 /* if (2^64 - MR size) < granularity, it's possible to get an
1811 * infinite loop here. This should catch such a wraparound */
1812 if ((addr + granularity) < addr) {
1813 break;
1814 }
1815 }
1816}
1817
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001818void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001819{
1820 IOMMUNotifier *notifier;
1821
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001822 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1823 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001824 }
1825}
1826
Peter Xucdb30812016-09-23 13:02:26 +08001827void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1828 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001829{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001830 IOMMUMemoryRegion *iommu_mr;
1831
Jason Wangefcd38c2016-12-30 18:09:17 +08001832 if (mr->alias) {
1833 memory_region_unregister_iommu_notifier(mr->alias, n);
1834 return;
1835 }
Peter Xucdb30812016-09-23 13:02:26 +08001836 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001837 iommu_mr = IOMMU_MEMORY_REGION(mr);
1838 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001839}
1840
Peter Xubd2bfa42017-04-07 18:59:10 +08001841void memory_region_notify_one(IOMMUNotifier *notifier,
1842 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001843{
Peter Xucdb30812016-09-23 13:02:26 +08001844 IOMMUNotifierFlag request_flags;
1845
Peter Xubd2bfa42017-04-07 18:59:10 +08001846 /*
1847 * Skip the notification if the notification does not overlap
1848 * with registered range.
1849 */
1850 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1851 notifier->end < entry->iova) {
1852 return;
1853 }
Peter Xucdb30812016-09-23 13:02:26 +08001854
Peter Xubd2bfa42017-04-07 18:59:10 +08001855 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001856 request_flags = IOMMU_NOTIFIER_MAP;
1857 } else {
1858 request_flags = IOMMU_NOTIFIER_UNMAP;
1859 }
1860
Peter Xubd2bfa42017-04-07 18:59:10 +08001861 if (notifier->notifier_flags & request_flags) {
1862 notifier->notify(notifier, entry);
1863 }
1864}
1865
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001866void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001867 IOMMUTLBEntry entry)
1868{
1869 IOMMUNotifier *iommu_notifier;
1870
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001871 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001872
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001873 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001874 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001875 }
David Gibson06866572013-05-14 19:13:56 +10001876}
1877
Avi Kivity093bc2c2011-07-26 14:26:01 +03001878void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1879{
Avi Kivity5a583342011-07-26 14:26:02 +03001880 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001881 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001882
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001883 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001884 old_logging = mr->vga_logging_count;
1885 mr->vga_logging_count += log ? 1 : -1;
1886 if (!!old_logging == !!mr->vga_logging_count) {
1887 return;
1888 }
1889
Jan Kiszka59023ef2012-08-23 13:02:30 +02001890 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001891 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001892 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001893 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001894}
1895
Avi Kivitya8170e52012-10-23 12:30:10 +02001896bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1897 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001898{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001899 assert(mr->ram_block);
1900 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1901 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001902}
1903
Avi Kivitya8170e52012-10-23 12:30:10 +02001904void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1905 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001906{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001907 assert(mr->ram_block);
1908 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1909 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001910 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001911}
1912
Juan Quintela6c279db2012-10-17 20:24:28 +02001913bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1914 hwaddr size, unsigned client)
1915{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001916 assert(mr->ram_block);
1917 return cpu_physical_memory_test_and_clear_dirty(
1918 memory_region_get_ram_addr(mr) + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001919}
1920
Gerd Hoffmann8deaf122017-04-21 11:16:25 +02001921DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1922 hwaddr addr,
1923 hwaddr size,
1924 unsigned client)
1925{
1926 assert(mr->ram_block);
1927 return cpu_physical_memory_snapshot_and_clear_dirty(
1928 memory_region_get_ram_addr(mr) + addr, size, client);
1929}
1930
1931bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1932 hwaddr addr, hwaddr size)
1933{
1934 assert(mr->ram_block);
1935 return cpu_physical_memory_snapshot_get_dirty(snap,
1936 memory_region_get_ram_addr(mr) + addr, size);
1937}
Juan Quintela6c279db2012-10-17 20:24:28 +02001938
Avi Kivity093bc2c2011-07-26 14:26:01 +03001939void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1940{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001941 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001942 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001943 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001944 FlatRange *fr;
1945
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001946 /* If the same address space has multiple log_sync listeners, we
1947 * visit that address space's FlatView multiple times. But because
1948 * log_sync listeners are rare, it's still cheaper than walking each
1949 * address space once.
1950 */
1951 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1952 if (!listener->log_sync) {
1953 continue;
1954 }
1955 as = listener->address_space;
1956 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001957 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001958 if (fr->mr == mr) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10001959 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001960 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001961 }
Avi Kivity5a583342011-07-26 14:26:02 +03001962 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001963 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001964 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001965}
1966
1967void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1968{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001969 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001970 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001971 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01001972 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001973 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001974 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001975}
1976
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001977void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001978{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001979 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001980 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001981 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01001982 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001983 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001984 }
1985}
1986
Avi Kivitya8170e52012-10-23 12:30:10 +02001987void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1988 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001989{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001990 assert(mr->ram_block);
1991 cpu_physical_memory_test_and_clear_dirty(
1992 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001993}
1994
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001995int memory_region_get_fd(MemoryRegion *mr)
1996{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001997 int fd;
1998
1999 rcu_read_lock();
2000 while (mr->alias) {
2001 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002002 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002003 fd = mr->ram_block->fd;
2004 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002005
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002006 return fd;
2007}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002008
Avi Kivity093bc2c2011-07-26 14:26:01 +03002009void *memory_region_get_ram_ptr(MemoryRegion *mr)
2010{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002011 void *ptr;
2012 uint64_t offset = 0;
2013
2014 rcu_read_lock();
2015 while (mr->alias) {
2016 offset += mr->alias_offset;
2017 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002018 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002019 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002020 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002021 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002022
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002023 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002024}
2025
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002026MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2027{
2028 RAMBlock *block;
2029
2030 block = qemu_ram_block_from_host(ptr, false, offset);
2031 if (!block) {
2032 return NULL;
2033 }
2034
2035 return block->mr;
2036}
2037
Fam Zheng7ebb2742016-03-01 14:18:20 +08002038ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2039{
2040 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2041}
2042
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002043void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2044{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002045 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002046
Gongleifa53a0e2016-05-10 10:04:59 +08002047 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002048}
2049
Avi Kivity0d673e32012-10-02 15:28:50 +02002050static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002051{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002052 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002053 FlatRange *fr;
2054 CoalescedMemoryRange *cmr;
2055 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02002056 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002057
Paolo Bonzini856d7242013-05-06 11:57:21 +02002058 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002059 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002060 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02002061 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002062 .fv = view,
Avi Kivity95d29942012-10-02 18:21:54 +02002063 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002064 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02002065 };
2066
Paolo Bonzini9a546352016-09-22 16:23:06 +02002067 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002068 int128_get64(fr->addr.start),
2069 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002070 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2071 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02002072 int128_sub(fr->addr.start,
2073 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002074 if (!addrrange_intersects(tmp, fr->addr)) {
2075 continue;
2076 }
2077 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002078 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002079 int128_get64(tmp.start),
2080 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002081 }
2082 }
2083 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002084 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002085}
2086
Avi Kivity0d673e32012-10-02 15:28:50 +02002087static void memory_region_update_coalesced_range(MemoryRegion *mr)
2088{
2089 AddressSpace *as;
2090
2091 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2092 memory_region_update_coalesced_range_as(mr, as);
2093 }
2094}
2095
Avi Kivity093bc2c2011-07-26 14:26:01 +03002096void memory_region_set_coalescing(MemoryRegion *mr)
2097{
2098 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002099 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002100}
2101
2102void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002103 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002104 uint64_t size)
2105{
Anthony Liguori7267c092011-08-20 22:09:37 -05002106 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002107
Avi Kivity08dafab2011-10-16 13:19:17 +02002108 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002109 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2110 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002111 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002112}
2113
2114void memory_region_clear_coalescing(MemoryRegion *mr)
2115{
2116 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002117 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002118
Jan Kiszkad4105152012-08-23 13:02:29 +02002119 qemu_flush_coalesced_mmio_buffer();
2120 mr->flush_coalesced_mmio = false;
2121
Avi Kivity093bc2c2011-07-26 14:26:01 +03002122 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2123 cmr = QTAILQ_FIRST(&mr->coalesced);
2124 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002125 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002126 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002127 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002128
2129 if (updated) {
2130 memory_region_update_coalesced_range(mr);
2131 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002132}
2133
Jan Kiszkad4105152012-08-23 13:02:29 +02002134void memory_region_set_flush_coalesced(MemoryRegion *mr)
2135{
2136 mr->flush_coalesced_mmio = true;
2137}
2138
2139void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2140{
2141 qemu_flush_coalesced_mmio_buffer();
2142 if (QTAILQ_EMPTY(&mr->coalesced)) {
2143 mr->flush_coalesced_mmio = false;
2144 }
2145}
2146
Jan Kiszka196ea132015-06-18 18:47:20 +02002147void memory_region_set_global_locking(MemoryRegion *mr)
2148{
2149 mr->global_locking = true;
2150}
2151
2152void memory_region_clear_global_locking(MemoryRegion *mr)
2153{
2154 mr->global_locking = false;
2155}
2156
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002157static bool userspace_eventfd_warning;
2158
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002159void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002160 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002161 unsigned size,
2162 bool match_data,
2163 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002164 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002165{
2166 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002167 .addr.start = int128_make64(addr),
2168 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002169 .match_data = match_data,
2170 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002171 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002172 };
2173 unsigned i;
2174
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002175 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2176 userspace_eventfd_warning))) {
2177 userspace_eventfd_warning = true;
2178 error_report("Using eventfd without MMIO binding in KVM. "
2179 "Suboptimal performance expected");
2180 }
2181
Jason Wangb8aecea2015-11-06 16:02:45 +08002182 if (size) {
2183 adjust_endianness(mr, &mrfd.data, size);
2184 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002185 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002186 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2187 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2188 break;
2189 }
2190 }
2191 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002192 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002193 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2194 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2195 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2196 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002197 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002198 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002199}
2200
2201void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002202 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002203 unsigned size,
2204 bool match_data,
2205 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002206 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002207{
2208 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002209 .addr.start = int128_make64(addr),
2210 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002211 .match_data = match_data,
2212 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002213 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002214 };
2215 unsigned i;
2216
Jason Wangb8aecea2015-11-06 16:02:45 +08002217 if (size) {
2218 adjust_endianness(mr, &mrfd.data, size);
2219 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002220 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002221 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2222 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2223 break;
2224 }
2225 }
2226 assert(i != mr->ioeventfd_nb);
2227 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2228 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2229 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002230 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002231 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002232 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002233 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002234}
2235
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002236static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002237{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002238 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002239 MemoryRegion *other;
2240
Jan Kiszka59023ef2012-08-23 13:02:30 +02002241 memory_region_transaction_begin();
2242
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002243 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002244 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002245 if (subregion->priority >= other->priority) {
2246 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2247 goto done;
2248 }
2249 }
2250 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2251done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002252 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002253 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002254}
2255
Peter Crosthwaite05987012014-06-05 23:14:44 -07002256static void memory_region_add_subregion_common(MemoryRegion *mr,
2257 hwaddr offset,
2258 MemoryRegion *subregion)
2259{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002260 assert(!subregion->container);
2261 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002262 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002263 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002264}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002265
2266void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002267 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002268 MemoryRegion *subregion)
2269{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002270 subregion->priority = 0;
2271 memory_region_add_subregion_common(mr, offset, subregion);
2272}
2273
2274void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002275 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002276 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002277 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002278{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002279 subregion->priority = priority;
2280 memory_region_add_subregion_common(mr, offset, subregion);
2281}
2282
2283void memory_region_del_subregion(MemoryRegion *mr,
2284 MemoryRegion *subregion)
2285{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002286 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002287 assert(subregion->container == mr);
2288 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002289 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002290 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002291 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002292 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002293}
2294
2295void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2296{
2297 if (enabled == mr->enabled) {
2298 return;
2299 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002300 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002301 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002302 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002303 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002304}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002305
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002306void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2307{
2308 Int128 s = int128_make64(size);
2309
2310 if (size == UINT64_MAX) {
2311 s = int128_2_64();
2312 }
2313 if (int128_eq(s, mr->size)) {
2314 return;
2315 }
2316 memory_region_transaction_begin();
2317 mr->size = s;
2318 memory_region_update_pending = true;
2319 memory_region_transaction_commit();
2320}
2321
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002322static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002323{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002324 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002325
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002326 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002327 memory_region_transaction_begin();
2328 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002329 memory_region_del_subregion(container, mr);
2330 mr->container = container;
2331 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002332 memory_region_unref(mr);
2333 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002334 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002335}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002336
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002337void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2338{
2339 if (addr != mr->addr) {
2340 mr->addr = addr;
2341 memory_region_readd_subregion(mr);
2342 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002343}
2344
Avi Kivitya8170e52012-10-23 12:30:10 +02002345void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002346{
Avi Kivity47033592011-12-04 19:16:50 +02002347 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002348
Jan Kiszka59023ef2012-08-23 13:02:30 +02002349 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002350 return;
2351 }
2352
Jan Kiszka59023ef2012-08-23 13:02:30 +02002353 memory_region_transaction_begin();
2354 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002355 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002356 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002357}
2358
Igor Mammedova2b257d2014-10-31 16:38:37 +00002359uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2360{
2361 return mr->align;
2362}
2363
Avi Kivitye2177952011-12-08 15:00:18 +02002364static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2365{
2366 const AddrRange *addr = addr_;
2367 const FlatRange *fr = fr_;
2368
2369 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2370 return -1;
2371 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2372 return 1;
2373 }
2374 return 0;
2375}
2376
Paolo Bonzini99e86342013-05-06 10:26:13 +02002377static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002378{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002379 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002380 sizeof(FlatRange), cmp_flatrange_addr);
2381}
2382
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002383bool memory_region_is_mapped(MemoryRegion *mr)
2384{
2385 return mr->container ? true : false;
2386}
2387
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002388/* Same as memory_region_find, but it does not add a reference to the
2389 * returned region. It must be called from an RCU critical section.
2390 */
2391static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2392 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002393{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002394 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002395 MemoryRegion *root;
2396 AddressSpace *as;
2397 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002398 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002399 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002400
Paolo Bonzini73034e92013-05-07 15:48:28 +02002401 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002402 for (root = mr; root->container; ) {
2403 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002404 addr += root->addr;
2405 }
2406
2407 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002408 if (!as) {
2409 return ret;
2410 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002411 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002412
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002413 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002414 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002415 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002416 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002417 }
2418
Paolo Bonzini99e86342013-05-06 10:26:13 +02002419 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002420 --fr;
2421 }
2422
2423 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002424 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002425 range = addrrange_intersection(range, fr->addr);
2426 ret.offset_within_region = fr->offset_in_region;
2427 ret.offset_within_region += int128_get64(int128_sub(range.start,
2428 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002429 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002430 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002431 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002432 return ret;
2433}
2434
2435MemoryRegionSection memory_region_find(MemoryRegion *mr,
2436 hwaddr addr, uint64_t size)
2437{
2438 MemoryRegionSection ret;
2439 rcu_read_lock();
2440 ret = memory_region_find_rcu(mr, addr, size);
2441 if (ret.mr) {
2442 memory_region_ref(ret.mr);
2443 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002444 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002445 return ret;
2446}
2447
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002448bool memory_region_present(MemoryRegion *container, hwaddr addr)
2449{
2450 MemoryRegion *mr;
2451
2452 rcu_read_lock();
2453 mr = memory_region_find_rcu(container, addr, 1).mr;
2454 rcu_read_unlock();
2455 return mr && mr != container;
2456}
2457
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002458void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002459{
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002460 MemoryListener *listener;
2461 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002462 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002463 FlatRange *fr;
2464
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002465 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2466 if (!listener->log_sync) {
2467 continue;
2468 }
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002469 as = listener->address_space;
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002470 view = address_space_get_flatview(as);
2471 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonziniadaad612016-09-22 16:09:08 +02002472 if (fr->dirty_log_mask) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002473 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2474
Paolo Bonziniadaad612016-09-22 16:09:08 +02002475 listener->log_sync(listener, &mrs);
2476 }
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002477 }
2478 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002479 }
2480}
2481
Jay Zhou19310762017-07-28 18:28:53 +08002482static VMChangeStateEntry *vmstate_change;
2483
Avi Kivity7664e802011-12-11 14:47:25 +02002484void memory_global_dirty_log_start(void)
2485{
Jay Zhou19310762017-07-28 18:28:53 +08002486 if (vmstate_change) {
2487 qemu_del_vm_change_state_handler(vmstate_change);
2488 vmstate_change = NULL;
2489 }
2490
Avi Kivity7664e802011-12-11 14:47:25 +02002491 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002492
Avi Kivity7376e582012-02-08 21:05:17 +02002493 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002494
2495 /* Refresh DIRTY_LOG_MIGRATION bit. */
2496 memory_region_transaction_begin();
2497 memory_region_update_pending = true;
2498 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002499}
2500
Jay Zhou19310762017-07-28 18:28:53 +08002501static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002502{
Avi Kivity7664e802011-12-11 14:47:25 +02002503 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002504
2505 /* Refresh DIRTY_LOG_MIGRATION bit. */
2506 memory_region_transaction_begin();
2507 memory_region_update_pending = true;
2508 memory_region_transaction_commit();
2509
Avi Kivity7376e582012-02-08 21:05:17 +02002510 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002511}
2512
Jay Zhou19310762017-07-28 18:28:53 +08002513static void memory_vm_change_state_handler(void *opaque, int running,
2514 RunState state)
2515{
2516 if (running) {
2517 memory_global_dirty_log_do_stop();
2518
2519 if (vmstate_change) {
2520 qemu_del_vm_change_state_handler(vmstate_change);
2521 vmstate_change = NULL;
2522 }
2523 }
2524}
2525
2526void memory_global_dirty_log_stop(void)
2527{
2528 if (!runstate_is_running()) {
2529 if (vmstate_change) {
2530 return;
2531 }
2532 vmstate_change = qemu_add_vm_change_state_handler(
2533 memory_vm_change_state_handler, NULL);
2534 return;
2535 }
2536
2537 memory_global_dirty_log_do_stop();
2538}
2539
Avi Kivity7664e802011-12-11 14:47:25 +02002540static void listener_add_address_space(MemoryListener *listener,
2541 AddressSpace *as)
2542{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002543 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002544 FlatRange *fr;
2545
Paolo Bonzini680a4782015-11-02 09:23:52 +01002546 if (listener->begin) {
2547 listener->begin(listener);
2548 }
Avi Kivity7664e802011-12-11 14:47:25 +02002549 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002550 if (listener->log_global_start) {
2551 listener->log_global_start(listener);
2552 }
Avi Kivity7664e802011-12-11 14:47:25 +02002553 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002554
Paolo Bonzini856d7242013-05-06 11:57:21 +02002555 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002556 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity7664e802011-12-11 14:47:25 +02002557 MemoryRegionSection section = {
2558 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002559 .fv = view,
Avi Kivity7664e802011-12-11 14:47:25 +02002560 .offset_within_region = fr->offset_in_region,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002561 .size = fr->addr.size,
Avi Kivity7664e802011-12-11 14:47:25 +02002562 .offset_within_address_space = int128_get64(fr->addr.start),
Avi Kivity7a8499e2012-02-08 17:01:23 +02002563 .readonly = fr->readonly,
Avi Kivity7664e802011-12-11 14:47:25 +02002564 };
Paolo Bonzini680a4782015-11-02 09:23:52 +01002565 if (fr->dirty_log_mask && listener->log_start) {
2566 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2567 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002568 if (listener->region_add) {
2569 listener->region_add(listener, &section);
2570 }
Avi Kivity7664e802011-12-11 14:47:25 +02002571 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002572 if (listener->commit) {
2573 listener->commit(listener);
2574 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002575 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002576}
2577
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002578void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002579{
Avi Kivity72e22d22012-02-08 15:05:50 +02002580 MemoryListener *other = NULL;
2581
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002582 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002583 if (QTAILQ_EMPTY(&memory_listeners)
2584 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2585 memory_listeners)->priority) {
2586 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2587 } else {
2588 QTAILQ_FOREACH(other, &memory_listeners, link) {
2589 if (listener->priority < other->priority) {
2590 break;
2591 }
2592 }
2593 QTAILQ_INSERT_BEFORE(other, listener, link);
2594 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002595
Paolo Bonzini9a546352016-09-22 16:23:06 +02002596 if (QTAILQ_EMPTY(&as->listeners)
2597 || listener->priority >= QTAILQ_LAST(&as->listeners,
2598 memory_listeners)->priority) {
2599 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2600 } else {
2601 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2602 if (listener->priority < other->priority) {
2603 break;
2604 }
2605 }
2606 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2607 }
2608
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002609 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002610}
2611
2612void memory_listener_unregister(MemoryListener *listener)
2613{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002614 if (!listener->address_space) {
2615 return;
2616 }
2617
Avi Kivity72e22d22012-02-08 15:05:50 +02002618 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002619 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002620 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002621}
Avi Kivitye2177952011-12-08 15:00:18 +02002622
KONRAD Fredericc9356742016-10-19 15:06:49 +02002623bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2624{
2625 void *host;
2626 unsigned size = 0;
2627 unsigned offset = 0;
2628 Object *new_interface;
2629
2630 if (!mr || !mr->ops->request_ptr) {
2631 return false;
2632 }
2633
2634 /*
2635 * Avoid an update if the request_ptr call
2636 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2637 * a cache.
2638 */
2639 memory_region_transaction_begin();
2640
2641 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2642
2643 if (!host || !size) {
2644 memory_region_transaction_commit();
2645 return false;
2646 }
2647
2648 new_interface = object_new("mmio_interface");
2649 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2650 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2651 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2652 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2653 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2654 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2655
2656 memory_region_transaction_commit();
2657 return true;
2658}
2659
2660typedef struct MMIOPtrInvalidate {
2661 MemoryRegion *mr;
2662 hwaddr offset;
2663 unsigned size;
2664 int busy;
2665 int allocated;
2666} MMIOPtrInvalidate;
2667
2668#define MAX_MMIO_INVALIDATE 10
2669static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2670
2671static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2672 run_on_cpu_data data)
2673{
2674 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2675 MemoryRegion *mr = invalidate_data->mr;
2676 hwaddr offset = invalidate_data->offset;
2677 unsigned size = invalidate_data->size;
2678 MemoryRegionSection section = memory_region_find(mr, offset, size);
2679
2680 qemu_mutex_lock_iothread();
2681
2682 /* Reset dirty so this doesn't happen later. */
2683 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2684
2685 if (section.mr != mr) {
2686 /* memory_region_find add a ref on section.mr */
2687 memory_region_unref(section.mr);
2688 if (MMIO_INTERFACE(section.mr->owner)) {
2689 /* We found the interface just drop it. */
2690 object_property_set_bool(section.mr->owner, false, "realized",
2691 NULL);
2692 object_unref(section.mr->owner);
2693 object_unparent(section.mr->owner);
2694 }
2695 }
2696
2697 qemu_mutex_unlock_iothread();
2698
2699 if (invalidate_data->allocated) {
2700 g_free(invalidate_data);
2701 } else {
2702 invalidate_data->busy = 0;
2703 }
2704}
2705
2706void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2707 unsigned size)
2708{
2709 size_t i;
2710 MMIOPtrInvalidate *invalidate_data = NULL;
2711
2712 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2713 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2714 invalidate_data = &mmio_ptr_invalidate_list[i];
2715 break;
2716 }
2717 }
2718
2719 if (!invalidate_data) {
2720 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2721 invalidate_data->allocated = 1;
2722 }
2723
2724 invalidate_data->mr = mr;
2725 invalidate_data->offset = offset;
2726 invalidate_data->size = size;
2727
2728 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2729 RUN_ON_CPU_HOST_PTR(invalidate_data));
2730}
2731
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002732void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002733{
Paolo Bonziniac951902015-02-11 15:21:04 +01002734 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002735 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002736 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002737 as->ioeventfd_nb = 0;
2738 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002739 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002740 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002741 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002742 address_space_update_topology(as);
2743 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002744}
Avi Kivity658b2222011-07-26 14:26:08 +03002745
Paolo Bonzini374f2982013-05-17 12:37:03 +02002746static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002747{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002748 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002749
Paolo Bonzini856d7242013-05-06 11:57:21 +02002750 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002751 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002752 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002753 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002754}
2755
Paolo Bonzini374f2982013-05-17 12:37:03 +02002756void address_space_destroy(AddressSpace *as)
2757{
Paolo Bonziniac951902015-02-11 15:21:04 +01002758 MemoryRegion *root = as->root;
2759
Paolo Bonzini374f2982013-05-17 12:37:03 +02002760 /* Flush out anything from MemoryListeners listening in on this */
2761 memory_region_transaction_begin();
2762 as->root = NULL;
2763 memory_region_transaction_commit();
2764 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2765
2766 /* At this point, as->dispatch and as->current_map are dummy
2767 * entries that the guest should never use. Wait for the old
2768 * values to expire before freeing the data.
2769 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002770 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002771 call_rcu(as, do_address_space_destroy, rcu);
2772}
2773
Peter Xu4e831902017-01-16 16:40:04 +08002774static const char *memory_region_type(MemoryRegion *mr)
2775{
2776 if (memory_region_is_ram_device(mr)) {
2777 return "ramd";
2778 } else if (memory_region_is_romd(mr)) {
2779 return "romd";
2780 } else if (memory_region_is_rom(mr)) {
2781 return "rom";
2782 } else if (memory_region_is_ram(mr)) {
2783 return "ram";
2784 } else {
2785 return "i/o";
2786 }
2787}
2788
Blue Swirl314e2982011-09-11 20:22:05 +00002789typedef struct MemoryRegionList MemoryRegionList;
2790
2791struct MemoryRegionList {
2792 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002793 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002794};
2795
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002796typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002797
Peter Xu4e831902017-01-16 16:40:04 +08002798#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2799 int128_sub((size), int128_one())) : 0)
2800#define MTREE_INDENT " "
2801
Blue Swirl314e2982011-09-11 20:22:05 +00002802static void mtree_print_mr(fprintf_function mon_printf, void *f,
2803 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002804 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002805 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002806{
Jan Kiszka9479c572011-09-27 15:00:41 +02002807 MemoryRegionList *new_ml, *ml, *next_ml;
2808 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002809 const MemoryRegion *submr;
2810 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002811 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002812
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002813 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002814 return;
2815 }
2816
2817 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002818 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002819 }
2820
Peter Xub31f8412017-03-14 20:56:27 +08002821 cur_start = base + mr->addr;
2822 cur_end = cur_start + MR_SIZE(mr->size);
2823
2824 /*
2825 * Try to detect overflow of memory region. This should never
2826 * happen normally. When it happens, we dump something to warn the
2827 * user who is observing this.
2828 */
2829 if (cur_start < base || cur_end < cur_start) {
2830 mon_printf(f, "[DETECTED OVERFLOW!] ");
2831 }
2832
Blue Swirl314e2982011-09-11 20:22:05 +00002833 if (mr->alias) {
2834 MemoryRegionList *ml;
2835 bool found = false;
2836
2837 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002838 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002839 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002840 found = true;
2841 }
2842 }
2843
2844 if (!found) {
2845 ml = g_new(MemoryRegionList, 1);
2846 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002847 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002848 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002849 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002850 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002851 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002852 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002853 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002854 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002855 memory_region_name(mr),
2856 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002857 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002858 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002859 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002860 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002861 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002862 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002863 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002864 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002865 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002866 memory_region_name(mr),
2867 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002868 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002869
2870 QTAILQ_INIT(&submr_print_queue);
2871
Blue Swirl314e2982011-09-11 20:22:05 +00002872 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002873 new_ml = g_new(MemoryRegionList, 1);
2874 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002875 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002876 if (new_ml->mr->addr < ml->mr->addr ||
2877 (new_ml->mr->addr == ml->mr->addr &&
2878 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002879 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002880 new_ml = NULL;
2881 break;
2882 }
2883 }
2884 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002885 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002886 }
2887 }
2888
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002889 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002890 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002891 alias_print_queue);
2892 }
2893
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002894 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002895 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002896 }
2897}
2898
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002899struct FlatViewInfo {
2900 fprintf_function mon_printf;
2901 void *f;
2902 int counter;
2903 bool dispatch_tree;
2904};
2905
2906static void mtree_print_flatview(gpointer key, gpointer value,
2907 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08002908{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002909 FlatView *view = key;
2910 GArray *fv_address_spaces = value;
2911 struct FlatViewInfo *fvi = user_data;
2912 fprintf_function p = fvi->mon_printf;
2913 void *f = fvi->f;
Peter Xu57bb40c2017-01-16 16:40:05 +08002914 FlatRange *range = &view->ranges[0];
2915 MemoryRegion *mr;
2916 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002917 int i;
2918 AddressSpace *as;
2919
2920 p(f, "FlatView #%d\n", fvi->counter);
2921 ++fvi->counter;
2922
2923 for (i = 0; i < fv_address_spaces->len; ++i) {
2924 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2925 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2926 if (as->root->alias) {
2927 p(f, ", alias %s", memory_region_name(as->root->alias));
2928 }
2929 p(f, "\n");
2930 }
2931
2932 p(f, " Root memory region: %s\n",
2933 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08002934
2935 if (n <= 0) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002936 p(f, MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08002937 return;
2938 }
2939
2940 while (n--) {
2941 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002942 if (range->offset_in_region) {
2943 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2944 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2945 int128_get64(range->addr.start),
2946 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2947 mr->priority,
2948 range->readonly ? "rom" : memory_region_type(mr),
2949 memory_region_name(mr),
2950 range->offset_in_region);
2951 } else {
2952 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2953 TARGET_FMT_plx " (prio %d, %s): %s\n",
2954 int128_get64(range->addr.start),
2955 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2956 mr->priority,
2957 range->readonly ? "rom" : memory_region_type(mr),
2958 memory_region_name(mr));
2959 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002960 range++;
2961 }
2962
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002963#if !defined(CONFIG_USER_ONLY)
2964 if (fvi->dispatch_tree && view->root) {
2965 mtree_print_dispatch(p, f, view->dispatch, view->root);
2966 }
2967#endif
2968
2969 p(f, "\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08002970}
2971
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002972static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
2973 gpointer user_data)
2974{
2975 FlatView *view = key;
2976 GArray *fv_address_spaces = value;
2977
2978 g_array_unref(fv_address_spaces);
2979 flatview_unref(view);
2980
2981 return true;
2982}
2983
2984void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
2985 bool dispatch_tree)
Blue Swirl314e2982011-09-11 20:22:05 +00002986{
2987 MemoryRegionListHead ml_head;
2988 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02002989 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00002990
Peter Xu57bb40c2017-01-16 16:40:05 +08002991 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002992 FlatView *view;
2993 struct FlatViewInfo fvi = {
2994 .mon_printf = mon_printf,
2995 .f = f,
2996 .counter = 0,
2997 .dispatch_tree = dispatch_tree
2998 };
2999 GArray *fv_address_spaces;
3000 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3001
3002 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003003 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003004 view = address_space_get_flatview(as);
3005
3006 fv_address_spaces = g_hash_table_lookup(views, view);
3007 if (!fv_address_spaces) {
3008 fv_address_spaces = g_array_new(false, false, sizeof(as));
3009 g_hash_table_insert(views, view, fv_address_spaces);
3010 }
3011
3012 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003013 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003014
3015 /* Print */
3016 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3017
3018 /* Free */
3019 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3020 g_hash_table_unref(views);
3021
Peter Xu57bb40c2017-01-16 16:40:05 +08003022 return;
3023 }
3024
Blue Swirl314e2982011-09-11 20:22:05 +00003025 QTAILQ_INIT(&ml_head);
3026
Avi Kivity0d673e32012-10-02 15:28:50 +02003027 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003028 mon_printf(f, "address-space: %s\n", as->name);
3029 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3030 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003031 }
3032
Blue Swirl314e2982011-09-11 20:22:05 +00003033 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003034 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003035 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3036 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3037 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003038 }
3039
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003040 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003041 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003042 }
Blue Swirl314e2982011-09-11 20:22:05 +00003043}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003044
Peter Maydellb08199c2017-07-07 15:42:51 +01003045void memory_region_init_ram(MemoryRegion *mr,
3046 struct Object *owner,
3047 const char *name,
3048 uint64_t size,
3049 Error **errp)
3050{
3051 DeviceState *owner_dev;
3052 Error *err = NULL;
3053
3054 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3055 if (err) {
3056 error_propagate(errp, err);
3057 return;
3058 }
3059 /* This will assert if owner is neither NULL nor a DeviceState.
3060 * We only want the owner here for the purposes of defining a
3061 * unique name for migration. TODO: Ideally we should implement
3062 * a naming scheme for Objects which are not DeviceStates, in
3063 * which case we can relax this restriction.
3064 */
3065 owner_dev = DEVICE(owner);
3066 vmstate_register_ram(mr, owner_dev);
3067}
3068
3069void memory_region_init_rom(MemoryRegion *mr,
3070 struct Object *owner,
3071 const char *name,
3072 uint64_t size,
3073 Error **errp)
3074{
3075 DeviceState *owner_dev;
3076 Error *err = NULL;
3077
3078 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3079 if (err) {
3080 error_propagate(errp, err);
3081 return;
3082 }
3083 /* This will assert if owner is neither NULL nor a DeviceState.
3084 * We only want the owner here for the purposes of defining a
3085 * unique name for migration. TODO: Ideally we should implement
3086 * a naming scheme for Objects which are not DeviceStates, in
3087 * which case we can relax this restriction.
3088 */
3089 owner_dev = DEVICE(owner);
3090 vmstate_register_ram(mr, owner_dev);
3091}
3092
3093void memory_region_init_rom_device(MemoryRegion *mr,
3094 struct Object *owner,
3095 const MemoryRegionOps *ops,
3096 void *opaque,
3097 const char *name,
3098 uint64_t size,
3099 Error **errp)
3100{
3101 DeviceState *owner_dev;
3102 Error *err = NULL;
3103
3104 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3105 name, size, &err);
3106 if (err) {
3107 error_propagate(errp, err);
3108 return;
3109 }
3110 /* This will assert if owner is neither NULL nor a DeviceState.
3111 * We only want the owner here for the purposes of defining a
3112 * unique name for migration. TODO: Ideally we should implement
3113 * a naming scheme for Objects which are not DeviceStates, in
3114 * which case we can relax this restriction.
3115 */
3116 owner_dev = DEVICE(owner);
3117 vmstate_register_ram(mr, owner_dev);
3118}
3119
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003120static const TypeInfo memory_region_info = {
3121 .parent = TYPE_OBJECT,
3122 .name = TYPE_MEMORY_REGION,
3123 .instance_size = sizeof(MemoryRegion),
3124 .instance_init = memory_region_initfn,
3125 .instance_finalize = memory_region_finalize,
3126};
3127
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003128static const TypeInfo iommu_memory_region_info = {
3129 .parent = TYPE_MEMORY_REGION,
3130 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003131 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003132 .instance_size = sizeof(IOMMUMemoryRegion),
3133 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003134 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003135};
3136
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003137static void memory_register_types(void)
3138{
3139 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003140 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003141}
3142
3143type_init(memory_register_types)