blob: 962e9b961fad5f84f3673c20a693c507b92a4e37 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020042static bool global_dirty_log = false;
43
Avi Kivity72e22d22012-02-08 15:05:50 +020044static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Avi Kivity093bc2c2011-07-26 14:26:01 +030050typedef struct AddrRange AddrRange;
51
Avi Kivity8417ceb2011-08-03 11:56:14 +030052/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080053 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030054 * (large MemoryRegion::alias_offset).
55 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030056struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020057 Int128 start;
58 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030059};
60
Avi Kivity08dafab2011-10-16 13:19:17 +020061static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030062{
63 return (AddrRange) { start, size };
64}
65
66static bool addrrange_equal(AddrRange r1, AddrRange r2)
67{
Avi Kivity08dafab2011-10-16 13:19:17 +020068 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030069}
70
Avi Kivity08dafab2011-10-16 13:19:17 +020071static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030072{
Avi Kivity08dafab2011-10-16 13:19:17 +020073 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030074}
75
Avi Kivity08dafab2011-10-16 13:19:17 +020076static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030077{
Avi Kivity08dafab2011-10-16 13:19:17 +020078 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030079 return range;
80}
81
Avi Kivity08dafab2011-10-16 13:19:17 +020082static bool addrrange_contains(AddrRange range, Int128 addr)
83{
84 return int128_ge(addr, range.start)
85 && int128_lt(addr, addrrange_end(range));
86}
87
Avi Kivity093bc2c2011-07-26 14:26:01 +030088static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89{
Avi Kivity08dafab2011-10-16 13:19:17 +020090 return addrrange_contains(r1, r2.start)
91 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030092}
93
94static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95{
Avi Kivity08dafab2011-10-16 13:19:17 +020096 Int128 start = int128_max(r1.start, r2.start);
97 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
98 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +030099}
100
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200101enum ListenerDirection { Forward, Reverse };
102
Avi Kivity7376e582012-02-08 21:05:17 +0200103#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200104 do { \
105 MemoryListener *_listener; \
106 \
107 switch (_direction) { \
108 case Forward: \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
112 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200113 } \
114 break; \
115 case Reverse: \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
120 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200121 } \
122 break; \
123 default: \
124 abort(); \
125 } \
126 } while (0)
127
Paolo Bonzini9a546352016-09-22 16:23:06 +0200128#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200129 do { \
130 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200131 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200132 \
133 switch (_direction) { \
134 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200137 _listener->_callback(_listener, _section, ##_args); \
138 } \
139 } \
140 break; \
141 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
143 link_as) { \
144 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200158 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200159 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200160
Avi Kivity093bc2c2011-07-26 14:26:01 +0300161struct CoalescedMemoryRange {
162 AddrRange addr;
163 QTAILQ_ENTRY(CoalescedMemoryRange) link;
164};
165
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300166struct MemoryRegionIoeventfd {
167 AddrRange addr;
168 bool match_data;
169 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200170 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300171};
172
173static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
174 MemoryRegionIoeventfd b)
175{
Avi Kivity08dafab2011-10-16 13:19:17 +0200176 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300177 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200178 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300179 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200180 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300181 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200182 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300183 return false;
184 } else if (a.match_data < b.match_data) {
185 return true;
186 } else if (a.match_data > b.match_data) {
187 return false;
188 } else if (a.match_data) {
189 if (a.data < b.data) {
190 return true;
191 } else if (a.data > b.data) {
192 return false;
193 }
194 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200195 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300196 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200197 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300198 return false;
199 }
200 return false;
201}
202
203static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
204 MemoryRegionIoeventfd b)
205{
206 return !memory_region_ioeventfd_before(a, b)
207 && !memory_region_ioeventfd_before(b, a);
208}
209
Avi Kivity093bc2c2011-07-26 14:26:01 +0300210typedef struct FlatRange FlatRange;
211typedef struct FlatView FlatView;
212
213/* Range of memory in the global map. Addresses are absolute. */
214struct FlatRange {
215 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200216 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300217 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300218 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200219 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300220 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300221};
222
223/* Flattened global view of current active memory hierarchy. Kept in sorted
224 * order.
225 */
226struct FlatView {
Paolo Bonzini374f2982013-05-17 12:37:03 +0200227 struct rcu_head rcu;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200228 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300229 FlatRange *ranges;
230 unsigned nr;
231 unsigned nr_allocated;
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000232 struct AddressSpaceDispatch *dispatch;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300233};
234
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300235typedef struct AddressSpaceOps AddressSpaceOps;
236
Avi Kivity093bc2c2011-07-26 14:26:01 +0300237#define FOR_EACH_FLAT_RANGE(var, view) \
238 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
239
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200240static inline MemoryRegionSection
241section_from_flat_range(FlatRange *fr, AddressSpace *as)
242{
243 return (MemoryRegionSection) {
244 .mr = fr->mr,
245 .address_space = as,
246 .offset_within_region = fr->offset_in_region,
247 .size = fr->addr.size,
248 .offset_within_address_space = int128_get64(fr->addr.start),
249 .readonly = fr->readonly,
250 };
251}
252
Avi Kivity093bc2c2011-07-26 14:26:01 +0300253static bool flatrange_equal(FlatRange *a, FlatRange *b)
254{
255 return a->mr == b->mr
256 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300257 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200258 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300259 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300260}
261
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000262static FlatView *flatview_new(void)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300263{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000264 FlatView *view;
265
266 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200267 view->ref = 1;
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000268
269 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300270}
271
272/* Insert a range into a given position. Caller is responsible for maintaining
273 * sorting order.
274 */
275static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
276{
277 if (view->nr == view->nr_allocated) {
278 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500279 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300280 view->nr_allocated * sizeof(*view->ranges));
281 }
282 memmove(view->ranges + pos + 1, view->ranges + pos,
283 (view->nr - pos) * sizeof(FlatRange));
284 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200285 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300286 ++view->nr;
287}
288
289static void flatview_destroy(FlatView *view)
290{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200291 int i;
292
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000293 if (view->dispatch) {
294 address_space_dispatch_free(view->dispatch);
295 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200296 for (i = 0; i < view->nr; i++) {
297 memory_region_unref(view->ranges[i].mr);
298 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500299 g_free(view->ranges);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200300 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300301}
302
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200303static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200304{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200305 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200306}
307
308static void flatview_unref(FlatView *view)
309{
310 if (atomic_fetch_dec(&view->ref) == 1) {
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000311 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200312 }
313}
314
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000315static FlatView *address_space_to_flatview(AddressSpace *as)
316{
317 return atomic_rcu_read(&as->current_map);
318}
319
320AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
321{
322 return fv->dispatch;
323}
324
325AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
326{
327 return flatview_to_dispatch(address_space_to_flatview(as));
328}
329
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300330static bool can_merge(FlatRange *r1, FlatRange *r2)
331{
Avi Kivity08dafab2011-10-16 13:19:17 +0200332 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300333 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200334 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
335 r1->addr.size),
336 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300337 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200338 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300339 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300340}
341
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000342/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300343static void flatview_simplify(FlatView *view)
344{
345 unsigned i, j;
346
347 i = 0;
348 while (i < view->nr) {
349 j = i + 1;
350 while (j < view->nr
351 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200352 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300353 ++j;
354 }
355 ++i;
356 memmove(&view->ranges[i], &view->ranges[j],
357 (view->nr - j) * sizeof(view->ranges[j]));
358 view->nr -= j - i;
359 }
360}
361
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200362static bool memory_region_big_endian(MemoryRegion *mr)
363{
364#ifdef TARGET_WORDS_BIGENDIAN
365 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
366#else
367 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
368#endif
369}
370
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200371static bool memory_region_wrong_endianness(MemoryRegion *mr)
372{
373#ifdef TARGET_WORDS_BIGENDIAN
374 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
375#else
376 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
377#endif
378}
379
380static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
381{
382 if (memory_region_wrong_endianness(mr)) {
383 switch (size) {
384 case 1:
385 break;
386 case 2:
387 *data = bswap16(*data);
388 break;
389 case 4:
390 *data = bswap32(*data);
391 break;
392 case 8:
393 *data = bswap64(*data);
394 break;
395 default:
396 abort();
397 }
398 }
399}
400
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800401static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
402{
403 MemoryRegion *root;
404 hwaddr abs_addr = offset;
405
406 abs_addr += mr->addr;
407 for (root = mr; root->container; ) {
408 root = root->container;
409 abs_addr += root->addr;
410 }
411
412 return abs_addr;
413}
414
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800415static int get_cpu_index(void)
416{
417 if (current_cpu) {
418 return current_cpu->cpu_index;
419 }
420 return -1;
421}
422
Peter Maydellcc05c432015-04-26 16:49:23 +0100423static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
424 hwaddr addr,
425 uint64_t *value,
426 unsigned size,
427 unsigned shift,
428 uint64_t mask,
429 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200430{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200431 uint64_t tmp;
432
433 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800434 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800435 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800436 } else if (mr == &io_mem_notdirty) {
437 /* Accesses to code which has previously been translated into a TB show
438 * up in the MMIO path, as accesses to the io_mem_notdirty
439 * MemoryRegion. */
440 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800441 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
442 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800443 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800444 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200445 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100446 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200447}
448
Peter Maydellcc05c432015-04-26 16:49:23 +0100449static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
450 hwaddr addr,
451 uint64_t *value,
452 unsigned size,
453 unsigned shift,
454 uint64_t mask,
455 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300456{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300457 uint64_t tmp;
458
459 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800460 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800461 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800462 } else if (mr == &io_mem_notdirty) {
463 /* Accesses to code which has previously been translated into a TB show
464 * up in the MMIO path, as accesses to the io_mem_notdirty
465 * MemoryRegion. */
466 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800467 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
468 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800469 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800470 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300471 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100472 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300473}
474
Peter Maydellcc05c432015-04-26 16:49:23 +0100475static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
476 hwaddr addr,
477 uint64_t *value,
478 unsigned size,
479 unsigned shift,
480 uint64_t mask,
481 MemTxAttrs attrs)
482{
483 uint64_t tmp = 0;
484 MemTxResult r;
485
Peter Maydellcc05c432015-04-26 16:49:23 +0100486 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800487 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800488 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800489 } else if (mr == &io_mem_notdirty) {
490 /* Accesses to code which has previously been translated into a TB show
491 * up in the MMIO path, as accesses to the io_mem_notdirty
492 * MemoryRegion. */
493 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800494 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
495 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800496 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800497 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100498 *value |= (tmp & mask) << shift;
499 return r;
500}
501
502static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
503 hwaddr addr,
504 uint64_t *value,
505 unsigned size,
506 unsigned shift,
507 uint64_t mask,
508 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200509{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200510 uint64_t tmp;
511
512 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800513 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800514 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800515 } else if (mr == &io_mem_notdirty) {
516 /* Accesses to code which has previously been translated into a TB show
517 * up in the MMIO path, as accesses to the io_mem_notdirty
518 * MemoryRegion. */
519 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800520 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
521 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800522 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800523 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200524 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100525 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200526}
527
Peter Maydellcc05c432015-04-26 16:49:23 +0100528static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
529 hwaddr addr,
530 uint64_t *value,
531 unsigned size,
532 unsigned shift,
533 uint64_t mask,
534 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300535{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300536 uint64_t tmp;
537
538 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800539 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800540 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800541 } else if (mr == &io_mem_notdirty) {
542 /* Accesses to code which has previously been translated into a TB show
543 * up in the MMIO path, as accesses to the io_mem_notdirty
544 * MemoryRegion. */
545 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800546 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
547 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800548 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800549 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300550 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100551 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300552}
553
Peter Maydellcc05c432015-04-26 16:49:23 +0100554static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
555 hwaddr addr,
556 uint64_t *value,
557 unsigned size,
558 unsigned shift,
559 uint64_t mask,
560 MemTxAttrs attrs)
561{
562 uint64_t tmp;
563
Peter Maydellcc05c432015-04-26 16:49:23 +0100564 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800565 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800566 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800567 } else if (mr == &io_mem_notdirty) {
568 /* Accesses to code which has previously been translated into a TB show
569 * up in the MMIO path, as accesses to the io_mem_notdirty
570 * MemoryRegion. */
571 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800572 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
573 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800574 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800575 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100576 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
577}
578
579static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300580 uint64_t *value,
581 unsigned size,
582 unsigned access_size_min,
583 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200584 MemTxResult (*access_fn)
585 (MemoryRegion *mr,
586 hwaddr addr,
587 uint64_t *value,
588 unsigned size,
589 unsigned shift,
590 uint64_t mask,
591 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100592 MemoryRegion *mr,
593 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300594{
595 uint64_t access_mask;
596 unsigned access_size;
597 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100598 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300599
600 if (!access_size_min) {
601 access_size_min = 1;
602 }
603 if (!access_size_max) {
604 access_size_max = 4;
605 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200606
607 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300608 access_size = MAX(MIN(size, access_size_max), access_size_min);
609 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200610 if (memory_region_big_endian(mr)) {
611 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200612 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100613 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200614 }
615 } else {
616 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200617 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100618 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200619 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300620 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100621 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300622}
623
Avi Kivitye2177952011-12-08 15:00:18 +0200624static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
625{
Avi Kivity0d673e32012-10-02 15:28:50 +0200626 AddressSpace *as;
627
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200628 while (mr->container) {
629 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200630 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200631 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
632 if (mr == as->root) {
633 return as;
634 }
Avi Kivitye2177952011-12-08 15:00:18 +0200635 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200636 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200637}
638
Avi Kivity093bc2c2011-07-26 14:26:01 +0300639/* Render a memory region into the global view. Ranges in @view obscure
640 * ranges in @mr.
641 */
642static void render_memory_region(FlatView *view,
643 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200644 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300645 AddrRange clip,
646 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300647{
648 MemoryRegion *subregion;
649 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200650 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200651 Int128 remain;
652 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300653 FlatRange fr;
654 AddrRange tmp;
655
Avi Kivity6bba19b2011-09-14 11:54:58 +0300656 if (!mr->enabled) {
657 return;
658 }
659
Avi Kivity08dafab2011-10-16 13:19:17 +0200660 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300661 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300662
663 tmp = addrrange_make(base, mr->size);
664
665 if (!addrrange_intersects(tmp, clip)) {
666 return;
667 }
668
669 clip = addrrange_intersection(tmp, clip);
670
671 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200672 int128_subfrom(&base, int128_make64(mr->alias->addr));
673 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300674 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300675 return;
676 }
677
678 /* Render subregions in priority order. */
679 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300680 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300681 }
682
Avi Kivity14a3c102011-07-26 14:26:06 +0300683 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300684 return;
685 }
686
Avi Kivity08dafab2011-10-16 13:19:17 +0200687 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300688 base = clip.start;
689 remain = clip.size;
690
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000691 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100692 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200693 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000694 fr.readonly = readonly;
695
Avi Kivity093bc2c2011-07-26 14:26:01 +0300696 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200697 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
698 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300699 continue;
700 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200701 if (int128_lt(base, view->ranges[i].addr.start)) {
702 now = int128_min(remain,
703 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300704 fr.offset_in_region = offset_in_region;
705 fr.addr = addrrange_make(base, now);
706 flatview_insert(view, i, &fr);
707 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200708 int128_addto(&base, now);
709 offset_in_region += int128_get64(now);
710 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300711 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200712 now = int128_sub(int128_min(int128_add(base, remain),
713 addrrange_end(view->ranges[i].addr)),
714 base);
715 int128_addto(&base, now);
716 offset_in_region += int128_get64(now);
717 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300718 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200719 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300720 fr.offset_in_region = offset_in_region;
721 fr.addr = addrrange_make(base, remain);
722 flatview_insert(view, i, &fr);
723 }
724}
725
726/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200727static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300728{
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200729 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300730
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000731 view = flatview_new();
Avi Kivity093bc2c2011-07-26 14:26:01 +0300732
Avi Kivity83f3c252012-10-07 12:59:55 +0200733 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200734 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200735 addrrange_make(int128_zero(), int128_2_64()), false);
736 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200737 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300738
739 return view;
740}
741
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300742static void address_space_add_del_ioeventfds(AddressSpace *as,
743 MemoryRegionIoeventfd *fds_new,
744 unsigned fds_new_nb,
745 MemoryRegionIoeventfd *fds_old,
746 unsigned fds_old_nb)
747{
748 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200749 MemoryRegionIoeventfd *fd;
750 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300751
752 /* Generate a symmetric difference of the old and new fd sets, adding
753 * and deleting as necessary.
754 */
755
756 iold = inew = 0;
757 while (iold < fds_old_nb || inew < fds_new_nb) {
758 if (iold < fds_old_nb
759 && (inew == fds_new_nb
760 || memory_region_ioeventfd_before(fds_old[iold],
761 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200762 fd = &fds_old[iold];
763 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200764 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200765 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200766 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200767 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200768 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200769 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300770 ++iold;
771 } else if (inew < fds_new_nb
772 && (iold == fds_old_nb
773 || memory_region_ioeventfd_before(fds_new[inew],
774 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200775 fd = &fds_new[inew];
776 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200777 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200778 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200779 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200780 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200781 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200782 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300783 ++inew;
784 } else {
785 ++iold;
786 ++inew;
787 }
788 }
789}
790
Paolo Bonzini856d7242013-05-06 11:57:21 +0200791static FlatView *address_space_get_flatview(AddressSpace *as)
792{
793 FlatView *view;
794
Paolo Bonzini374f2982013-05-17 12:37:03 +0200795 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200796 do {
797 view = atomic_rcu_read(&as->current_map);
798 /* If somebody has replaced as->current_map concurrently,
799 * flatview_ref returns false.
800 */
801 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200802 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200803 return view;
804}
805
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300806static void address_space_update_ioeventfds(AddressSpace *as)
807{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200808 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300809 FlatRange *fr;
810 unsigned ioeventfd_nb = 0;
811 MemoryRegionIoeventfd *ioeventfds = NULL;
812 AddrRange tmp;
813 unsigned i;
814
Paolo Bonzini856d7242013-05-06 11:57:21 +0200815 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200816 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300817 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
818 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200819 int128_sub(fr->addr.start,
820 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300821 if (addrrange_intersects(fr->addr, tmp)) {
822 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500823 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300824 ioeventfd_nb * sizeof(*ioeventfds));
825 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
826 ioeventfds[ioeventfd_nb-1].addr = tmp;
827 }
828 }
829 }
830
831 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
832 as->ioeventfds, as->ioeventfd_nb);
833
Anthony Liguori7267c092011-08-20 22:09:37 -0500834 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300835 as->ioeventfds = ioeventfds;
836 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200837 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300838}
839
Avi Kivityb8af1af2011-07-26 14:26:12 +0300840static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200841 const FlatView *old_view,
842 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300843 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300844{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300845 unsigned iold, inew;
846 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300847
848 /* Generate a symmetric difference of the old and new memory maps.
849 * Kill ranges in the old map, and instantiate ranges in the new map.
850 */
851 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200852 while (iold < old_view->nr || inew < new_view->nr) {
853 if (iold < old_view->nr) {
854 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300855 } else {
856 frold = NULL;
857 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200858 if (inew < new_view->nr) {
859 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300860 } else {
861 frnew = NULL;
862 }
863
864 if (frold
865 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200866 || int128_lt(frold->addr.start, frnew->addr.start)
867 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300868 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000869 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300870
Avi Kivityb8af1af2011-07-26 14:26:12 +0300871 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200872 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300873 }
874
Avi Kivity093bc2c2011-07-26 14:26:01 +0300875 ++iold;
876 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000877 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300878
Avi Kivityb8af1af2011-07-26 14:26:12 +0300879 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200880 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200881 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
882 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
883 frold->dirty_log_mask,
884 frnew->dirty_log_mask);
885 }
886 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
887 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
888 frold->dirty_log_mask,
889 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300890 }
Avi Kivity5a583342011-07-26 14:26:02 +0300891 }
892
Avi Kivity093bc2c2011-07-26 14:26:01 +0300893 ++iold;
894 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300895 } else {
896 /* In new */
897
Avi Kivityb8af1af2011-07-26 14:26:12 +0300898 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200899 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300900 }
901
Avi Kivity093bc2c2011-07-26 14:26:01 +0300902 ++inew;
903 }
904 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300905}
906
Avi Kivityb8af1af2011-07-26 14:26:12 +0300907static void address_space_update_topology(AddressSpace *as)
908{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200909 FlatView *old_view = address_space_get_flatview(as);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200910 FlatView *new_view = generate_memory_topology(as->root);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000911 int i;
Avi Kivityb8af1af2011-07-26 14:26:12 +0300912
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000913 new_view->dispatch = mem_begin(as);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000914 for (i = 0; i < new_view->nr; i++) {
915 MemoryRegionSection mrs =
916 section_from_flat_range(&new_view->ranges[i], as);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000917 mem_add(as, new_view, &mrs);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000918 }
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000919 mem_commit(new_view->dispatch);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000920
921 if (!QTAILQ_EMPTY(&as->listeners)) {
922 address_space_update_topology_pass(as, old_view, new_view, false);
923 address_space_update_topology_pass(as, old_view, new_view, true);
924 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300925
Paolo Bonzini374f2982013-05-17 12:37:03 +0200926 /* Writes are protected by the BQL. */
927 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000928 flatview_unref(old_view);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200929
930 /* Note that all the old MemoryRegions are still alive up to this
931 * point. This relieves most MemoryListeners from the need to
932 * ref/unref the MemoryRegions they get---unless they use them
933 * outside the iothread mutex, in which case precise reference
934 * counting is necessary.
935 */
936 flatview_unref(old_view);
937
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300938 address_space_update_ioeventfds(as);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300939}
940
Avi Kivity4ef4db82011-07-26 14:26:13 +0300941void memory_region_transaction_begin(void)
942{
Jan Kiszkabb880de2012-08-23 13:02:32 +0200943 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +0300944 ++memory_region_transaction_depth;
945}
946
947void memory_region_transaction_commit(void)
948{
Avi Kivity0d673e32012-10-02 15:28:50 +0200949 AddressSpace *as;
950
Avi Kivity4ef4db82011-07-26 14:26:13 +0300951 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000952 assert(qemu_mutex_iothread_locked());
953
Avi Kivity4ef4db82011-07-26 14:26:13 +0300954 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +0800955 if (!memory_region_transaction_depth) {
956 if (memory_region_update_pending) {
957 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +0200958
Gonglei4dc56152014-05-08 11:47:32 +0800959 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
960 address_space_update_topology(as);
961 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000962 memory_region_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +0800963 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
964 } else if (ioeventfd_update_pending) {
965 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
966 address_space_update_ioeventfds(as);
967 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000968 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +0200969 }
Gonglei4dc56152014-05-08 11:47:32 +0800970 }
Avi Kivity4ef4db82011-07-26 14:26:13 +0300971}
972
Avi Kivity545e92e2011-08-08 19:58:48 +0300973static void memory_region_destructor_none(MemoryRegion *mr)
974{
975}
976
977static void memory_region_destructor_ram(MemoryRegion *mr)
978{
Fam Zhengf1060c52016-03-01 14:18:22 +0800979 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +0300980}
981
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700982static bool memory_region_need_escape(char c)
983{
984 return c == '/' || c == '[' || c == '\\' || c == ']';
985}
986
987static char *memory_region_escape_name(const char *name)
988{
989 const char *p;
990 char *escaped, *q;
991 uint8_t c;
992 size_t bytes = 0;
993
994 for (p = name; *p; p++) {
995 bytes += memory_region_need_escape(*p) ? 4 : 1;
996 }
997 if (bytes == p - name) {
998 return g_memdup(name, bytes + 1);
999 }
1000
1001 escaped = g_malloc(bytes + 1);
1002 for (p = name, q = escaped; *p; p++) {
1003 c = *p;
1004 if (unlikely(memory_region_need_escape(c))) {
1005 *q++ = '\\';
1006 *q++ = 'x';
1007 *q++ = "0123456789abcdef"[c >> 4];
1008 c = "0123456789abcdef"[c & 15];
1009 }
1010 *q++ = c;
1011 }
1012 *q = 0;
1013 return escaped;
1014}
1015
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001016static void memory_region_do_init(MemoryRegion *mr,
1017 Object *owner,
1018 const char *name,
1019 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001020{
Avi Kivity08dafab2011-10-16 13:19:17 +02001021 mr->size = int128_make64(size);
1022 if (size == UINT64_MAX) {
1023 mr->size = int128_2_64();
1024 }
Peter Maydell302fa282014-08-19 20:05:46 +01001025 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001026 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001027 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001028
1029 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001030 char *escaped_name = memory_region_escape_name(name);
1031 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001032
1033 if (!owner) {
1034 owner = container_get(qdev_get_machine(), "/unattached");
1035 }
1036
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001037 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001038 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001039 g_free(name_array);
1040 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001041 }
1042}
1043
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001044void memory_region_init(MemoryRegion *mr,
1045 Object *owner,
1046 const char *name,
1047 uint64_t size)
1048{
1049 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1050 memory_region_do_init(mr, owner, name, size);
1051}
1052
Eric Blaked7bce992016-01-29 06:48:55 -07001053static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1054 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001055{
1056 MemoryRegion *mr = MEMORY_REGION(obj);
1057 uint64_t value = mr->addr;
1058
Eric Blake51e72bc2016-01-29 06:48:54 -07001059 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001060}
1061
Eric Blaked7bce992016-01-29 06:48:55 -07001062static void memory_region_get_container(Object *obj, Visitor *v,
1063 const char *name, void *opaque,
1064 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001065{
1066 MemoryRegion *mr = MEMORY_REGION(obj);
1067 gchar *path = (gchar *)"";
1068
1069 if (mr->container) {
1070 path = object_get_canonical_path(OBJECT(mr->container));
1071 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001072 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001073 if (mr->container) {
1074 g_free(path);
1075 }
1076}
1077
1078static Object *memory_region_resolve_container(Object *obj, void *opaque,
1079 const char *part)
1080{
1081 MemoryRegion *mr = MEMORY_REGION(obj);
1082
1083 return OBJECT(mr->container);
1084}
1085
Eric Blaked7bce992016-01-29 06:48:55 -07001086static void memory_region_get_priority(Object *obj, Visitor *v,
1087 const char *name, void *opaque,
1088 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001089{
1090 MemoryRegion *mr = MEMORY_REGION(obj);
1091 int32_t value = mr->priority;
1092
Eric Blake51e72bc2016-01-29 06:48:54 -07001093 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001094}
1095
Eric Blaked7bce992016-01-29 06:48:55 -07001096static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1097 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001098{
1099 MemoryRegion *mr = MEMORY_REGION(obj);
1100 uint64_t value = memory_region_size(mr);
1101
Eric Blake51e72bc2016-01-29 06:48:54 -07001102 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001103}
1104
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001105static void memory_region_initfn(Object *obj)
1106{
1107 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001108 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001109
1110 mr->ops = &unassigned_mem_ops;
1111 mr->enabled = true;
1112 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001113 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001114 mr->destructor = memory_region_destructor_none;
1115 QTAILQ_INIT(&mr->subregions);
1116 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001117
1118 op = object_property_add(OBJECT(mr), "container",
1119 "link<" TYPE_MEMORY_REGION ">",
1120 memory_region_get_container,
1121 NULL, /* memory_region_set_container */
1122 NULL, NULL, &error_abort);
1123 op->resolve = memory_region_resolve_container;
1124
1125 object_property_add(OBJECT(mr), "addr", "uint64",
1126 memory_region_get_addr,
1127 NULL, /* memory_region_set_addr */
1128 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001129 object_property_add(OBJECT(mr), "priority", "uint32",
1130 memory_region_get_priority,
1131 NULL, /* memory_region_set_priority */
1132 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001133 object_property_add(OBJECT(mr), "size", "uint64",
1134 memory_region_get_size,
1135 NULL, /* memory_region_set_size, */
1136 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001137}
1138
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001139static void iommu_memory_region_initfn(Object *obj)
1140{
1141 MemoryRegion *mr = MEMORY_REGION(obj);
1142
1143 mr->is_iommu = true;
1144}
1145
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001146static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1147 unsigned size)
1148{
1149#ifdef DEBUG_UNASSIGNED
1150 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1151#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001152 if (current_cpu != NULL) {
1153 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001154 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001155 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001156}
1157
1158static void unassigned_mem_write(void *opaque, hwaddr addr,
1159 uint64_t val, unsigned size)
1160{
1161#ifdef DEBUG_UNASSIGNED
1162 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1163#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001164 if (current_cpu != NULL) {
1165 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001166 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001167}
1168
Paolo Bonzinid1970632013-05-24 13:23:38 +02001169static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1170 unsigned size, bool is_write)
1171{
1172 return false;
1173}
1174
1175const MemoryRegionOps unassigned_mem_ops = {
1176 .valid.accepts = unassigned_mem_accepts,
1177 .endianness = DEVICE_NATIVE_ENDIAN,
1178};
1179
Alex Williamson4a2e2422016-10-31 09:53:03 -06001180static uint64_t memory_region_ram_device_read(void *opaque,
1181 hwaddr addr, unsigned size)
1182{
1183 MemoryRegion *mr = opaque;
1184 uint64_t data = (uint64_t)~0;
1185
1186 switch (size) {
1187 case 1:
1188 data = *(uint8_t *)(mr->ram_block->host + addr);
1189 break;
1190 case 2:
1191 data = *(uint16_t *)(mr->ram_block->host + addr);
1192 break;
1193 case 4:
1194 data = *(uint32_t *)(mr->ram_block->host + addr);
1195 break;
1196 case 8:
1197 data = *(uint64_t *)(mr->ram_block->host + addr);
1198 break;
1199 }
1200
1201 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1202
1203 return data;
1204}
1205
1206static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1207 uint64_t data, unsigned size)
1208{
1209 MemoryRegion *mr = opaque;
1210
1211 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1212
1213 switch (size) {
1214 case 1:
1215 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1216 break;
1217 case 2:
1218 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1219 break;
1220 case 4:
1221 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1222 break;
1223 case 8:
1224 *(uint64_t *)(mr->ram_block->host + addr) = data;
1225 break;
1226 }
1227}
1228
1229static const MemoryRegionOps ram_device_mem_ops = {
1230 .read = memory_region_ram_device_read,
1231 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001232 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001233 .valid = {
1234 .min_access_size = 1,
1235 .max_access_size = 8,
1236 .unaligned = true,
1237 },
1238 .impl = {
1239 .min_access_size = 1,
1240 .max_access_size = 8,
1241 .unaligned = true,
1242 },
1243};
1244
Paolo Bonzinid2702032013-05-24 11:55:06 +02001245bool memory_region_access_valid(MemoryRegion *mr,
1246 hwaddr addr,
1247 unsigned size,
1248 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001249{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001250 int access_size_min, access_size_max;
1251 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001252
Avi Kivity093bc2c2011-07-26 14:26:01 +03001253 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1254 return false;
1255 }
1256
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001257 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001258 return true;
1259 }
1260
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001261 access_size_min = mr->ops->valid.min_access_size;
1262 if (!mr->ops->valid.min_access_size) {
1263 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001264 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001265
1266 access_size_max = mr->ops->valid.max_access_size;
1267 if (!mr->ops->valid.max_access_size) {
1268 access_size_max = 4;
1269 }
1270
1271 access_size = MAX(MIN(size, access_size_max), access_size_min);
1272 for (i = 0; i < size; i += access_size) {
1273 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1274 is_write)) {
1275 return false;
1276 }
1277 }
1278
Avi Kivity093bc2c2011-07-26 14:26:01 +03001279 return true;
1280}
1281
Peter Maydellcc05c432015-04-26 16:49:23 +01001282static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1283 hwaddr addr,
1284 uint64_t *pval,
1285 unsigned size,
1286 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001287{
Peter Maydellcc05c432015-04-26 16:49:23 +01001288 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001289
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001290 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001291 return access_with_adjusted_size(addr, pval, size,
1292 mr->ops->impl.min_access_size,
1293 mr->ops->impl.max_access_size,
1294 memory_region_read_accessor,
1295 mr, attrs);
1296 } else if (mr->ops->read_with_attrs) {
1297 return access_with_adjusted_size(addr, pval, size,
1298 mr->ops->impl.min_access_size,
1299 mr->ops->impl.max_access_size,
1300 memory_region_read_with_attrs_accessor,
1301 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001302 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001303 return access_with_adjusted_size(addr, pval, size, 1, 4,
1304 memory_region_oldmmio_read_accessor,
1305 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001306 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001307}
1308
Peter Maydell3b643492015-04-26 16:49:23 +01001309MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1310 hwaddr addr,
1311 uint64_t *pval,
1312 unsigned size,
1313 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001314{
Peter Maydellcc05c432015-04-26 16:49:23 +01001315 MemTxResult r;
1316
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001317 if (!memory_region_access_valid(mr, addr, size, false)) {
1318 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001319 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001320 }
Avi Kivitya621f382012-01-02 13:12:08 +02001321
Peter Maydellcc05c432015-04-26 16:49:23 +01001322 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001323 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001324 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001325}
1326
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001327/* Return true if an eventfd was signalled */
1328static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1329 hwaddr addr,
1330 uint64_t data,
1331 unsigned size,
1332 MemTxAttrs attrs)
1333{
1334 MemoryRegionIoeventfd ioeventfd = {
1335 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1336 .data = data,
1337 };
1338 unsigned i;
1339
1340 for (i = 0; i < mr->ioeventfd_nb; i++) {
1341 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1342 ioeventfd.e = mr->ioeventfds[i].e;
1343
1344 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1345 event_notifier_set(ioeventfd.e);
1346 return true;
1347 }
1348 }
1349
1350 return false;
1351}
1352
Peter Maydell3b643492015-04-26 16:49:23 +01001353MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1354 hwaddr addr,
1355 uint64_t data,
1356 unsigned size,
1357 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001358{
Avi Kivity897fa7c2011-11-13 13:05:27 +02001359 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001360 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001361 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001362 }
1363
Avi Kivitya621f382012-01-02 13:12:08 +02001364 adjust_endianness(mr, &data, size);
1365
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001366 if ((!kvm_eventfds_enabled()) &&
1367 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1368 return MEMTX_OK;
1369 }
1370
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001371 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001372 return access_with_adjusted_size(addr, &data, size,
1373 mr->ops->impl.min_access_size,
1374 mr->ops->impl.max_access_size,
1375 memory_region_write_accessor, mr,
1376 attrs);
1377 } else if (mr->ops->write_with_attrs) {
1378 return
1379 access_with_adjusted_size(addr, &data, size,
1380 mr->ops->impl.min_access_size,
1381 mr->ops->impl.max_access_size,
1382 memory_region_write_with_attrs_accessor,
1383 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001384 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001385 return access_with_adjusted_size(addr, &data, size, 1, 4,
1386 memory_region_oldmmio_write_accessor,
1387 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001388 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001389}
1390
Avi Kivity093bc2c2011-07-26 14:26:01 +03001391void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001392 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001393 const MemoryRegionOps *ops,
1394 void *opaque,
1395 const char *name,
1396 uint64_t size)
1397{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001398 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001399 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001400 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001401 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001402}
1403
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001404void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1405 Object *owner,
1406 const char *name,
1407 uint64_t size,
1408 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001409{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001410 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001411 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001412 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001413 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001414 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001415 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001416}
1417
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001418void memory_region_init_resizeable_ram(MemoryRegion *mr,
1419 Object *owner,
1420 const char *name,
1421 uint64_t size,
1422 uint64_t max_size,
1423 void (*resized)(const char*,
1424 uint64_t length,
1425 void *host),
1426 Error **errp)
1427{
1428 memory_region_init(mr, owner, name, size);
1429 mr->ram = true;
1430 mr->terminates = true;
1431 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001432 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1433 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001434 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001435}
1436
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001437#ifdef __linux__
1438void memory_region_init_ram_from_file(MemoryRegion *mr,
1439 struct Object *owner,
1440 const char *name,
1441 uint64_t size,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001442 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001443 const char *path,
1444 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001445{
1446 memory_region_init(mr, owner, name, size);
1447 mr->ram = true;
1448 mr->terminates = true;
1449 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001450 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001451 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001452}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001453
1454void memory_region_init_ram_from_fd(MemoryRegion *mr,
1455 struct Object *owner,
1456 const char *name,
1457 uint64_t size,
1458 bool share,
1459 int fd,
1460 Error **errp)
1461{
1462 memory_region_init(mr, owner, name, size);
1463 mr->ram = true;
1464 mr->terminates = true;
1465 mr->destructor = memory_region_destructor_ram;
1466 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1467 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1468}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001469#endif
1470
Avi Kivity093bc2c2011-07-26 14:26:01 +03001471void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001472 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001473 const char *name,
1474 uint64_t size,
1475 void *ptr)
1476{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001477 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001478 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001479 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001480 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001481 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001482
1483 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1484 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001485 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001486}
1487
Alex Williamson21e00fa2016-10-31 09:53:03 -06001488void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1489 Object *owner,
1490 const char *name,
1491 uint64_t size,
1492 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301493{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001494 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1495 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001496 mr->ops = &ram_device_mem_ops;
1497 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301498}
1499
Avi Kivity093bc2c2011-07-26 14:26:01 +03001500void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001501 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001502 const char *name,
1503 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001504 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001505 uint64_t size)
1506{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001507 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001508 mr->alias = orig;
1509 mr->alias_offset = offset;
1510}
1511
Peter Maydellb59821a2017-07-07 15:42:50 +01001512void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1513 struct Object *owner,
1514 const char *name,
1515 uint64_t size,
1516 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001517{
1518 memory_region_init(mr, owner, name, size);
1519 mr->ram = true;
1520 mr->readonly = true;
1521 mr->terminates = true;
1522 mr->destructor = memory_region_destructor_ram;
1523 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1524 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1525}
1526
Peter Maydellb59821a2017-07-07 15:42:50 +01001527void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1528 Object *owner,
1529 const MemoryRegionOps *ops,
1530 void *opaque,
1531 const char *name,
1532 uint64_t size,
1533 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001534{
Peter Maydell39e0b032016-07-04 13:06:35 +01001535 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001536 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001537 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001538 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001539 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001540 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001541 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001542 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001543}
1544
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001545void memory_region_init_iommu(void *_iommu_mr,
1546 size_t instance_size,
1547 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001548 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001549 const char *name,
1550 uint64_t size)
1551{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001552 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001553 struct MemoryRegion *mr;
1554
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001555 object_initialize(_iommu_mr, instance_size, mrtypename);
1556 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001557 memory_region_do_init(mr, owner, name, size);
1558 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001559 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001560 QLIST_INIT(&iommu_mr->iommu_notify);
1561 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001562}
1563
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001564static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001565{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001566 MemoryRegion *mr = MEMORY_REGION(obj);
1567
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001568 assert(!mr->container);
1569
1570 /* We know the region is not visible in any address space (it
1571 * does not have a container and cannot be a root either because
1572 * it has no references, so we can blindly clear mr->enabled.
1573 * memory_region_set_enabled instead could trigger a transaction
1574 * and cause an infinite loop.
1575 */
1576 mr->enabled = false;
1577 memory_region_transaction_begin();
1578 while (!QTAILQ_EMPTY(&mr->subregions)) {
1579 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1580 memory_region_del_subregion(mr, subregion);
1581 }
1582 memory_region_transaction_commit();
1583
Avi Kivity545e92e2011-08-08 19:58:48 +03001584 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001585 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001586 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001587 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001588}
1589
Paolo Bonzini803c0812013-05-07 06:59:09 +02001590Object *memory_region_owner(MemoryRegion *mr)
1591{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001592 Object *obj = OBJECT(mr);
1593 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001594}
1595
Paolo Bonzini46637be2013-05-07 09:06:00 +02001596void memory_region_ref(MemoryRegion *mr)
1597{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001598 /* MMIO callbacks most likely will access data that belongs
1599 * to the owner, hence the need to ref/unref the owner whenever
1600 * the memory region is in use.
1601 *
1602 * The memory region is a child of its owner. As long as the
1603 * owner doesn't call unparent itself on the memory region,
1604 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001605 * Memory regions without an owner are supposed to never go away;
1606 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001607 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001608 if (mr && mr->owner) {
1609 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001610 }
1611}
1612
1613void memory_region_unref(MemoryRegion *mr)
1614{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001615 if (mr && mr->owner) {
1616 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001617 }
1618}
1619
Avi Kivity093bc2c2011-07-26 14:26:01 +03001620uint64_t memory_region_size(MemoryRegion *mr)
1621{
Avi Kivity08dafab2011-10-16 13:19:17 +02001622 if (int128_eq(mr->size, int128_2_64())) {
1623 return UINT64_MAX;
1624 }
1625 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001626}
1627
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001628const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001629{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001630 if (!mr->name) {
1631 ((MemoryRegion *)mr)->name =
1632 object_get_canonical_path_component(OBJECT(mr));
1633 }
Peter Maydell302fa282014-08-19 20:05:46 +01001634 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001635}
1636
Alex Williamson21e00fa2016-10-31 09:53:03 -06001637bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301638{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001639 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301640}
1641
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001642uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001643{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001644 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001645 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001646 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1647 }
1648 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001649}
1650
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001651bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1652{
1653 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1654}
1655
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001656static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001657{
1658 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1659 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001660 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001661
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001662 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001663 flags |= iommu_notifier->notifier_flags;
1664 }
1665
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001666 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1667 imrc->notify_flag_changed(iommu_mr,
1668 iommu_mr->iommu_notify_flags,
1669 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001670 }
1671
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001672 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001673}
1674
Peter Xucdb30812016-09-23 13:02:26 +08001675void memory_region_register_iommu_notifier(MemoryRegion *mr,
1676 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001677{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001678 IOMMUMemoryRegion *iommu_mr;
1679
Jason Wangefcd38c2016-12-30 18:09:17 +08001680 if (mr->alias) {
1681 memory_region_register_iommu_notifier(mr->alias, n);
1682 return;
1683 }
1684
Peter Xucdb30812016-09-23 13:02:26 +08001685 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001686 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001687 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001688 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001689 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1690 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001691}
1692
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001693uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001694{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001695 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1696
1697 if (imrc->get_min_page_size) {
1698 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001699 }
1700 return TARGET_PAGE_SIZE;
1701}
1702
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001703void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001704{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001705 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001706 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001707 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001708 IOMMUTLBEntry iotlb;
1709
Peter Xufaa362e2017-04-07 18:59:11 +08001710 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001711 if (imrc->replay) {
1712 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001713 return;
1714 }
1715
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001716 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001717
David Gibsona788f222015-09-30 12:13:55 +10001718 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001719 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001720 if (iotlb.perm != IOMMU_NONE) {
1721 n->notify(n, &iotlb);
1722 }
1723
1724 /* if (2^64 - MR size) < granularity, it's possible to get an
1725 * infinite loop here. This should catch such a wraparound */
1726 if ((addr + granularity) < addr) {
1727 break;
1728 }
1729 }
1730}
1731
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001732void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001733{
1734 IOMMUNotifier *notifier;
1735
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001736 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1737 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001738 }
1739}
1740
Peter Xucdb30812016-09-23 13:02:26 +08001741void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1742 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001743{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001744 IOMMUMemoryRegion *iommu_mr;
1745
Jason Wangefcd38c2016-12-30 18:09:17 +08001746 if (mr->alias) {
1747 memory_region_unregister_iommu_notifier(mr->alias, n);
1748 return;
1749 }
Peter Xucdb30812016-09-23 13:02:26 +08001750 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001751 iommu_mr = IOMMU_MEMORY_REGION(mr);
1752 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001753}
1754
Peter Xubd2bfa42017-04-07 18:59:10 +08001755void memory_region_notify_one(IOMMUNotifier *notifier,
1756 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001757{
Peter Xucdb30812016-09-23 13:02:26 +08001758 IOMMUNotifierFlag request_flags;
1759
Peter Xubd2bfa42017-04-07 18:59:10 +08001760 /*
1761 * Skip the notification if the notification does not overlap
1762 * with registered range.
1763 */
1764 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1765 notifier->end < entry->iova) {
1766 return;
1767 }
Peter Xucdb30812016-09-23 13:02:26 +08001768
Peter Xubd2bfa42017-04-07 18:59:10 +08001769 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001770 request_flags = IOMMU_NOTIFIER_MAP;
1771 } else {
1772 request_flags = IOMMU_NOTIFIER_UNMAP;
1773 }
1774
Peter Xubd2bfa42017-04-07 18:59:10 +08001775 if (notifier->notifier_flags & request_flags) {
1776 notifier->notify(notifier, entry);
1777 }
1778}
1779
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001780void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001781 IOMMUTLBEntry entry)
1782{
1783 IOMMUNotifier *iommu_notifier;
1784
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001785 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001786
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001787 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001788 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001789 }
David Gibson06866572013-05-14 19:13:56 +10001790}
1791
Avi Kivity093bc2c2011-07-26 14:26:01 +03001792void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1793{
Avi Kivity5a583342011-07-26 14:26:02 +03001794 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001795 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001796
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001797 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001798 old_logging = mr->vga_logging_count;
1799 mr->vga_logging_count += log ? 1 : -1;
1800 if (!!old_logging == !!mr->vga_logging_count) {
1801 return;
1802 }
1803
Jan Kiszka59023ef2012-08-23 13:02:30 +02001804 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001805 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001806 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001807 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001808}
1809
Avi Kivitya8170e52012-10-23 12:30:10 +02001810bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1811 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001812{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001813 assert(mr->ram_block);
1814 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1815 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001816}
1817
Avi Kivitya8170e52012-10-23 12:30:10 +02001818void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1819 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001820{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001821 assert(mr->ram_block);
1822 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1823 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001824 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001825}
1826
Juan Quintela6c279db2012-10-17 20:24:28 +02001827bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1828 hwaddr size, unsigned client)
1829{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001830 assert(mr->ram_block);
1831 return cpu_physical_memory_test_and_clear_dirty(
1832 memory_region_get_ram_addr(mr) + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001833}
1834
Gerd Hoffmann8deaf122017-04-21 11:16:25 +02001835DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1836 hwaddr addr,
1837 hwaddr size,
1838 unsigned client)
1839{
1840 assert(mr->ram_block);
1841 return cpu_physical_memory_snapshot_and_clear_dirty(
1842 memory_region_get_ram_addr(mr) + addr, size, client);
1843}
1844
1845bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1846 hwaddr addr, hwaddr size)
1847{
1848 assert(mr->ram_block);
1849 return cpu_physical_memory_snapshot_get_dirty(snap,
1850 memory_region_get_ram_addr(mr) + addr, size);
1851}
Juan Quintela6c279db2012-10-17 20:24:28 +02001852
Avi Kivity093bc2c2011-07-26 14:26:01 +03001853void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1854{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001855 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001856 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001857 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001858 FlatRange *fr;
1859
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001860 /* If the same address space has multiple log_sync listeners, we
1861 * visit that address space's FlatView multiple times. But because
1862 * log_sync listeners are rare, it's still cheaper than walking each
1863 * address space once.
1864 */
1865 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1866 if (!listener->log_sync) {
1867 continue;
1868 }
1869 as = listener->address_space;
1870 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001871 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001872 if (fr->mr == mr) {
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001873 MemoryRegionSection mrs = section_from_flat_range(fr, as);
1874 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001875 }
Avi Kivity5a583342011-07-26 14:26:02 +03001876 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001877 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001878 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001879}
1880
1881void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1882{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001883 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001884 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001885 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01001886 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001887 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001888 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001889}
1890
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001891void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001892{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001893 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001894 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001895 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01001896 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001897 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001898 }
1899}
1900
Avi Kivitya8170e52012-10-23 12:30:10 +02001901void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1902 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001903{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001904 assert(mr->ram_block);
1905 cpu_physical_memory_test_and_clear_dirty(
1906 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001907}
1908
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001909int memory_region_get_fd(MemoryRegion *mr)
1910{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001911 int fd;
1912
1913 rcu_read_lock();
1914 while (mr->alias) {
1915 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001916 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001917 fd = mr->ram_block->fd;
1918 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001919
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001920 return fd;
1921}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001922
Avi Kivity093bc2c2011-07-26 14:26:01 +03001923void *memory_region_get_ram_ptr(MemoryRegion *mr)
1924{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001925 void *ptr;
1926 uint64_t offset = 0;
1927
1928 rcu_read_lock();
1929 while (mr->alias) {
1930 offset += mr->alias_offset;
1931 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001932 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08001933 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001934 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001935 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001936
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001937 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001938}
1939
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001940MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1941{
1942 RAMBlock *block;
1943
1944 block = qemu_ram_block_from_host(ptr, false, offset);
1945 if (!block) {
1946 return NULL;
1947 }
1948
1949 return block->mr;
1950}
1951
Fam Zheng7ebb2742016-03-01 14:18:20 +08001952ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1953{
1954 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1955}
1956
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001957void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1958{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001959 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001960
Gongleifa53a0e2016-05-10 10:04:59 +08001961 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001962}
1963
Avi Kivity0d673e32012-10-02 15:28:50 +02001964static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001965{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001966 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001967 FlatRange *fr;
1968 CoalescedMemoryRange *cmr;
1969 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02001970 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001971
Paolo Bonzini856d7242013-05-06 11:57:21 +02001972 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001973 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001974 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02001975 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +02001976 .address_space = as,
Avi Kivity95d29942012-10-02 18:21:54 +02001977 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001978 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02001979 };
1980
Paolo Bonzini9a546352016-09-22 16:23:06 +02001981 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02001982 int128_get64(fr->addr.start),
1983 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001984 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1985 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02001986 int128_sub(fr->addr.start,
1987 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001988 if (!addrrange_intersects(tmp, fr->addr)) {
1989 continue;
1990 }
1991 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02001992 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02001993 int128_get64(tmp.start),
1994 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001995 }
1996 }
1997 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001998 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001999}
2000
Avi Kivity0d673e32012-10-02 15:28:50 +02002001static void memory_region_update_coalesced_range(MemoryRegion *mr)
2002{
2003 AddressSpace *as;
2004
2005 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2006 memory_region_update_coalesced_range_as(mr, as);
2007 }
2008}
2009
Avi Kivity093bc2c2011-07-26 14:26:01 +03002010void memory_region_set_coalescing(MemoryRegion *mr)
2011{
2012 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002013 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002014}
2015
2016void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002017 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002018 uint64_t size)
2019{
Anthony Liguori7267c092011-08-20 22:09:37 -05002020 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002021
Avi Kivity08dafab2011-10-16 13:19:17 +02002022 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002023 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2024 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002025 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002026}
2027
2028void memory_region_clear_coalescing(MemoryRegion *mr)
2029{
2030 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002031 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002032
Jan Kiszkad4105152012-08-23 13:02:29 +02002033 qemu_flush_coalesced_mmio_buffer();
2034 mr->flush_coalesced_mmio = false;
2035
Avi Kivity093bc2c2011-07-26 14:26:01 +03002036 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2037 cmr = QTAILQ_FIRST(&mr->coalesced);
2038 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002039 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002040 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002041 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002042
2043 if (updated) {
2044 memory_region_update_coalesced_range(mr);
2045 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002046}
2047
Jan Kiszkad4105152012-08-23 13:02:29 +02002048void memory_region_set_flush_coalesced(MemoryRegion *mr)
2049{
2050 mr->flush_coalesced_mmio = true;
2051}
2052
2053void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2054{
2055 qemu_flush_coalesced_mmio_buffer();
2056 if (QTAILQ_EMPTY(&mr->coalesced)) {
2057 mr->flush_coalesced_mmio = false;
2058 }
2059}
2060
Jan Kiszka196ea132015-06-18 18:47:20 +02002061void memory_region_set_global_locking(MemoryRegion *mr)
2062{
2063 mr->global_locking = true;
2064}
2065
2066void memory_region_clear_global_locking(MemoryRegion *mr)
2067{
2068 mr->global_locking = false;
2069}
2070
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002071static bool userspace_eventfd_warning;
2072
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002073void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002074 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002075 unsigned size,
2076 bool match_data,
2077 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002078 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002079{
2080 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002081 .addr.start = int128_make64(addr),
2082 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002083 .match_data = match_data,
2084 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002085 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002086 };
2087 unsigned i;
2088
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002089 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2090 userspace_eventfd_warning))) {
2091 userspace_eventfd_warning = true;
2092 error_report("Using eventfd without MMIO binding in KVM. "
2093 "Suboptimal performance expected");
2094 }
2095
Jason Wangb8aecea2015-11-06 16:02:45 +08002096 if (size) {
2097 adjust_endianness(mr, &mrfd.data, size);
2098 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002099 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002100 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2101 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2102 break;
2103 }
2104 }
2105 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002106 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002107 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2108 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2109 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2110 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002111 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002112 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002113}
2114
2115void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002116 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002117 unsigned size,
2118 bool match_data,
2119 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002120 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002121{
2122 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002123 .addr.start = int128_make64(addr),
2124 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002125 .match_data = match_data,
2126 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002127 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002128 };
2129 unsigned i;
2130
Jason Wangb8aecea2015-11-06 16:02:45 +08002131 if (size) {
2132 adjust_endianness(mr, &mrfd.data, size);
2133 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002134 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002135 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2136 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2137 break;
2138 }
2139 }
2140 assert(i != mr->ioeventfd_nb);
2141 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2142 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2143 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002144 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002145 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002146 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002147 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002148}
2149
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002150static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002151{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002152 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002153 MemoryRegion *other;
2154
Jan Kiszka59023ef2012-08-23 13:02:30 +02002155 memory_region_transaction_begin();
2156
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002157 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002158 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002159 if (subregion->priority >= other->priority) {
2160 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2161 goto done;
2162 }
2163 }
2164 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2165done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002166 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002167 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002168}
2169
Peter Crosthwaite05987012014-06-05 23:14:44 -07002170static void memory_region_add_subregion_common(MemoryRegion *mr,
2171 hwaddr offset,
2172 MemoryRegion *subregion)
2173{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002174 assert(!subregion->container);
2175 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002176 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002177 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002178}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002179
2180void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002181 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002182 MemoryRegion *subregion)
2183{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002184 subregion->priority = 0;
2185 memory_region_add_subregion_common(mr, offset, subregion);
2186}
2187
2188void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002189 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002190 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002191 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002192{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002193 subregion->priority = priority;
2194 memory_region_add_subregion_common(mr, offset, subregion);
2195}
2196
2197void memory_region_del_subregion(MemoryRegion *mr,
2198 MemoryRegion *subregion)
2199{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002200 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002201 assert(subregion->container == mr);
2202 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002203 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002204 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002205 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002206 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002207}
2208
2209void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2210{
2211 if (enabled == mr->enabled) {
2212 return;
2213 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002214 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002215 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002216 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002217 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002218}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002219
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002220void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2221{
2222 Int128 s = int128_make64(size);
2223
2224 if (size == UINT64_MAX) {
2225 s = int128_2_64();
2226 }
2227 if (int128_eq(s, mr->size)) {
2228 return;
2229 }
2230 memory_region_transaction_begin();
2231 mr->size = s;
2232 memory_region_update_pending = true;
2233 memory_region_transaction_commit();
2234}
2235
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002236static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002237{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002238 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002239
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002240 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002241 memory_region_transaction_begin();
2242 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002243 memory_region_del_subregion(container, mr);
2244 mr->container = container;
2245 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002246 memory_region_unref(mr);
2247 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002248 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002249}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002250
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002251void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2252{
2253 if (addr != mr->addr) {
2254 mr->addr = addr;
2255 memory_region_readd_subregion(mr);
2256 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002257}
2258
Avi Kivitya8170e52012-10-23 12:30:10 +02002259void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002260{
Avi Kivity47033592011-12-04 19:16:50 +02002261 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002262
Jan Kiszka59023ef2012-08-23 13:02:30 +02002263 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002264 return;
2265 }
2266
Jan Kiszka59023ef2012-08-23 13:02:30 +02002267 memory_region_transaction_begin();
2268 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002269 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002270 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002271}
2272
Igor Mammedova2b257d2014-10-31 16:38:37 +00002273uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2274{
2275 return mr->align;
2276}
2277
Avi Kivitye2177952011-12-08 15:00:18 +02002278static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2279{
2280 const AddrRange *addr = addr_;
2281 const FlatRange *fr = fr_;
2282
2283 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2284 return -1;
2285 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2286 return 1;
2287 }
2288 return 0;
2289}
2290
Paolo Bonzini99e86342013-05-06 10:26:13 +02002291static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002292{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002293 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002294 sizeof(FlatRange), cmp_flatrange_addr);
2295}
2296
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002297bool memory_region_is_mapped(MemoryRegion *mr)
2298{
2299 return mr->container ? true : false;
2300}
2301
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002302/* Same as memory_region_find, but it does not add a reference to the
2303 * returned region. It must be called from an RCU critical section.
2304 */
2305static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2306 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002307{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002308 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002309 MemoryRegion *root;
2310 AddressSpace *as;
2311 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002312 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002313 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002314
Paolo Bonzini73034e92013-05-07 15:48:28 +02002315 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002316 for (root = mr; root->container; ) {
2317 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002318 addr += root->addr;
2319 }
2320
2321 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002322 if (!as) {
2323 return ret;
2324 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002325 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002326
Paolo Bonzini2b647662013-05-17 12:40:44 +02002327 view = atomic_rcu_read(&as->current_map);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002328 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002329 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002330 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002331 }
2332
Paolo Bonzini99e86342013-05-06 10:26:13 +02002333 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002334 --fr;
2335 }
2336
2337 ret.mr = fr->mr;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002338 ret.address_space = as;
Avi Kivitye2177952011-12-08 15:00:18 +02002339 range = addrrange_intersection(range, fr->addr);
2340 ret.offset_within_region = fr->offset_in_region;
2341 ret.offset_within_region += int128_get64(int128_sub(range.start,
2342 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002343 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002344 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002345 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002346 return ret;
2347}
2348
2349MemoryRegionSection memory_region_find(MemoryRegion *mr,
2350 hwaddr addr, uint64_t size)
2351{
2352 MemoryRegionSection ret;
2353 rcu_read_lock();
2354 ret = memory_region_find_rcu(mr, addr, size);
2355 if (ret.mr) {
2356 memory_region_ref(ret.mr);
2357 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002358 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002359 return ret;
2360}
2361
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002362bool memory_region_present(MemoryRegion *container, hwaddr addr)
2363{
2364 MemoryRegion *mr;
2365
2366 rcu_read_lock();
2367 mr = memory_region_find_rcu(container, addr, 1).mr;
2368 rcu_read_unlock();
2369 return mr && mr != container;
2370}
2371
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002372void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002373{
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002374 MemoryListener *listener;
2375 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002376 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002377 FlatRange *fr;
2378
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002379 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2380 if (!listener->log_sync) {
2381 continue;
2382 }
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002383 as = listener->address_space;
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002384 view = address_space_get_flatview(as);
2385 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonziniadaad612016-09-22 16:09:08 +02002386 if (fr->dirty_log_mask) {
2387 MemoryRegionSection mrs = section_from_flat_range(fr, as);
2388 listener->log_sync(listener, &mrs);
2389 }
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002390 }
2391 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002392 }
2393}
2394
Jay Zhou19310762017-07-28 18:28:53 +08002395static VMChangeStateEntry *vmstate_change;
2396
Avi Kivity7664e802011-12-11 14:47:25 +02002397void memory_global_dirty_log_start(void)
2398{
Jay Zhou19310762017-07-28 18:28:53 +08002399 if (vmstate_change) {
2400 qemu_del_vm_change_state_handler(vmstate_change);
2401 vmstate_change = NULL;
2402 }
2403
Avi Kivity7664e802011-12-11 14:47:25 +02002404 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002405
Avi Kivity7376e582012-02-08 21:05:17 +02002406 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002407
2408 /* Refresh DIRTY_LOG_MIGRATION bit. */
2409 memory_region_transaction_begin();
2410 memory_region_update_pending = true;
2411 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002412}
2413
Jay Zhou19310762017-07-28 18:28:53 +08002414static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002415{
Avi Kivity7664e802011-12-11 14:47:25 +02002416 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002417
2418 /* Refresh DIRTY_LOG_MIGRATION bit. */
2419 memory_region_transaction_begin();
2420 memory_region_update_pending = true;
2421 memory_region_transaction_commit();
2422
Avi Kivity7376e582012-02-08 21:05:17 +02002423 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002424}
2425
Jay Zhou19310762017-07-28 18:28:53 +08002426static void memory_vm_change_state_handler(void *opaque, int running,
2427 RunState state)
2428{
2429 if (running) {
2430 memory_global_dirty_log_do_stop();
2431
2432 if (vmstate_change) {
2433 qemu_del_vm_change_state_handler(vmstate_change);
2434 vmstate_change = NULL;
2435 }
2436 }
2437}
2438
2439void memory_global_dirty_log_stop(void)
2440{
2441 if (!runstate_is_running()) {
2442 if (vmstate_change) {
2443 return;
2444 }
2445 vmstate_change = qemu_add_vm_change_state_handler(
2446 memory_vm_change_state_handler, NULL);
2447 return;
2448 }
2449
2450 memory_global_dirty_log_do_stop();
2451}
2452
Avi Kivity7664e802011-12-11 14:47:25 +02002453static void listener_add_address_space(MemoryListener *listener,
2454 AddressSpace *as)
2455{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002456 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002457 FlatRange *fr;
2458
Paolo Bonzini680a4782015-11-02 09:23:52 +01002459 if (listener->begin) {
2460 listener->begin(listener);
2461 }
Avi Kivity7664e802011-12-11 14:47:25 +02002462 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002463 if (listener->log_global_start) {
2464 listener->log_global_start(listener);
2465 }
Avi Kivity7664e802011-12-11 14:47:25 +02002466 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002467
Paolo Bonzini856d7242013-05-06 11:57:21 +02002468 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002469 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity7664e802011-12-11 14:47:25 +02002470 MemoryRegionSection section = {
2471 .mr = fr->mr,
Avi Kivityf6790af2012-10-02 20:13:51 +02002472 .address_space = as,
Avi Kivity7664e802011-12-11 14:47:25 +02002473 .offset_within_region = fr->offset_in_region,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002474 .size = fr->addr.size,
Avi Kivity7664e802011-12-11 14:47:25 +02002475 .offset_within_address_space = int128_get64(fr->addr.start),
Avi Kivity7a8499e2012-02-08 17:01:23 +02002476 .readonly = fr->readonly,
Avi Kivity7664e802011-12-11 14:47:25 +02002477 };
Paolo Bonzini680a4782015-11-02 09:23:52 +01002478 if (fr->dirty_log_mask && listener->log_start) {
2479 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2480 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002481 if (listener->region_add) {
2482 listener->region_add(listener, &section);
2483 }
Avi Kivity7664e802011-12-11 14:47:25 +02002484 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002485 if (listener->commit) {
2486 listener->commit(listener);
2487 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002488 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002489}
2490
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002491void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002492{
Avi Kivity72e22d22012-02-08 15:05:50 +02002493 MemoryListener *other = NULL;
2494
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002495 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002496 if (QTAILQ_EMPTY(&memory_listeners)
2497 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2498 memory_listeners)->priority) {
2499 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2500 } else {
2501 QTAILQ_FOREACH(other, &memory_listeners, link) {
2502 if (listener->priority < other->priority) {
2503 break;
2504 }
2505 }
2506 QTAILQ_INSERT_BEFORE(other, listener, link);
2507 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002508
Paolo Bonzini9a546352016-09-22 16:23:06 +02002509 if (QTAILQ_EMPTY(&as->listeners)
2510 || listener->priority >= QTAILQ_LAST(&as->listeners,
2511 memory_listeners)->priority) {
2512 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2513 } else {
2514 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2515 if (listener->priority < other->priority) {
2516 break;
2517 }
2518 }
2519 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2520 }
2521
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002522 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002523}
2524
2525void memory_listener_unregister(MemoryListener *listener)
2526{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002527 if (!listener->address_space) {
2528 return;
2529 }
2530
Avi Kivity72e22d22012-02-08 15:05:50 +02002531 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002532 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002533 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002534}
Avi Kivitye2177952011-12-08 15:00:18 +02002535
KONRAD Fredericc9356742016-10-19 15:06:49 +02002536bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2537{
2538 void *host;
2539 unsigned size = 0;
2540 unsigned offset = 0;
2541 Object *new_interface;
2542
2543 if (!mr || !mr->ops->request_ptr) {
2544 return false;
2545 }
2546
2547 /*
2548 * Avoid an update if the request_ptr call
2549 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2550 * a cache.
2551 */
2552 memory_region_transaction_begin();
2553
2554 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2555
2556 if (!host || !size) {
2557 memory_region_transaction_commit();
2558 return false;
2559 }
2560
2561 new_interface = object_new("mmio_interface");
2562 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2563 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2564 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2565 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2566 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2567 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2568
2569 memory_region_transaction_commit();
2570 return true;
2571}
2572
2573typedef struct MMIOPtrInvalidate {
2574 MemoryRegion *mr;
2575 hwaddr offset;
2576 unsigned size;
2577 int busy;
2578 int allocated;
2579} MMIOPtrInvalidate;
2580
2581#define MAX_MMIO_INVALIDATE 10
2582static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2583
2584static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2585 run_on_cpu_data data)
2586{
2587 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2588 MemoryRegion *mr = invalidate_data->mr;
2589 hwaddr offset = invalidate_data->offset;
2590 unsigned size = invalidate_data->size;
2591 MemoryRegionSection section = memory_region_find(mr, offset, size);
2592
2593 qemu_mutex_lock_iothread();
2594
2595 /* Reset dirty so this doesn't happen later. */
2596 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2597
2598 if (section.mr != mr) {
2599 /* memory_region_find add a ref on section.mr */
2600 memory_region_unref(section.mr);
2601 if (MMIO_INTERFACE(section.mr->owner)) {
2602 /* We found the interface just drop it. */
2603 object_property_set_bool(section.mr->owner, false, "realized",
2604 NULL);
2605 object_unref(section.mr->owner);
2606 object_unparent(section.mr->owner);
2607 }
2608 }
2609
2610 qemu_mutex_unlock_iothread();
2611
2612 if (invalidate_data->allocated) {
2613 g_free(invalidate_data);
2614 } else {
2615 invalidate_data->busy = 0;
2616 }
2617}
2618
2619void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2620 unsigned size)
2621{
2622 size_t i;
2623 MMIOPtrInvalidate *invalidate_data = NULL;
2624
2625 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2626 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2627 invalidate_data = &mmio_ptr_invalidate_list[i];
2628 break;
2629 }
2630 }
2631
2632 if (!invalidate_data) {
2633 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2634 invalidate_data->allocated = 1;
2635 }
2636
2637 invalidate_data->mr = mr;
2638 invalidate_data->offset = offset;
2639 invalidate_data->size = size;
2640
2641 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2642 RUN_ON_CPU_HOST_PTR(invalidate_data));
2643}
2644
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002645void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002646{
Paolo Bonziniac951902015-02-11 15:21:04 +01002647 memory_region_ref(root);
Jan Kiszka59023ef2012-08-23 13:02:30 +02002648 memory_region_transaction_begin();
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002649 as->ref_count = 1;
Avi Kivity8786db72012-10-02 13:53:41 +02002650 as->root = root;
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002651 as->malloced = false;
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +10002652 as->current_map = flatview_new();
Avi Kivity4c19eb72012-10-30 13:47:44 +02002653 as->ioeventfd_nb = 0;
2654 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002655 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002656 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002657 as->name = g_strdup(name ? name : "anonymous");
Paolo Bonzinif43793c2013-04-16 15:39:51 +02002658 memory_region_update_pending |= root->enabled;
2659 memory_region_transaction_commit();
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002660}
Avi Kivity658b2222011-07-26 14:26:08 +03002661
Paolo Bonzini374f2982013-05-17 12:37:03 +02002662static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002663{
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002664 bool do_free = as->malloced;
David Gibson078c44f2014-05-30 12:59:00 -06002665
Paolo Bonzini9a546352016-09-22 16:23:06 +02002666 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002667
Paolo Bonzini856d7242013-05-06 11:57:21 +02002668 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002669 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002670 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002671 memory_region_unref(as->root);
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002672 if (do_free) {
2673 g_free(as);
2674 }
2675}
2676
2677AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2678{
2679 AddressSpace *as;
2680
2681 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2682 if (root == as->root && as->malloced) {
2683 as->ref_count++;
2684 return as;
2685 }
2686 }
2687
2688 as = g_malloc0(sizeof *as);
2689 address_space_init(as, root, name);
2690 as->malloced = true;
2691 return as;
Avi Kivity83f3c252012-10-07 12:59:55 +02002692}
2693
Paolo Bonzini374f2982013-05-17 12:37:03 +02002694void address_space_destroy(AddressSpace *as)
2695{
Paolo Bonziniac951902015-02-11 15:21:04 +01002696 MemoryRegion *root = as->root;
2697
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002698 as->ref_count--;
2699 if (as->ref_count) {
2700 return;
2701 }
Paolo Bonzini374f2982013-05-17 12:37:03 +02002702 /* Flush out anything from MemoryListeners listening in on this */
2703 memory_region_transaction_begin();
2704 as->root = NULL;
2705 memory_region_transaction_commit();
2706 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2707
2708 /* At this point, as->dispatch and as->current_map are dummy
2709 * entries that the guest should never use. Wait for the old
2710 * values to expire before freeing the data.
2711 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002712 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002713 call_rcu(as, do_address_space_destroy, rcu);
2714}
2715
Peter Xu4e831902017-01-16 16:40:04 +08002716static const char *memory_region_type(MemoryRegion *mr)
2717{
2718 if (memory_region_is_ram_device(mr)) {
2719 return "ramd";
2720 } else if (memory_region_is_romd(mr)) {
2721 return "romd";
2722 } else if (memory_region_is_rom(mr)) {
2723 return "rom";
2724 } else if (memory_region_is_ram(mr)) {
2725 return "ram";
2726 } else {
2727 return "i/o";
2728 }
2729}
2730
Blue Swirl314e2982011-09-11 20:22:05 +00002731typedef struct MemoryRegionList MemoryRegionList;
2732
2733struct MemoryRegionList {
2734 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002735 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002736};
2737
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002738typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002739
Peter Xu4e831902017-01-16 16:40:04 +08002740#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2741 int128_sub((size), int128_one())) : 0)
2742#define MTREE_INDENT " "
2743
Blue Swirl314e2982011-09-11 20:22:05 +00002744static void mtree_print_mr(fprintf_function mon_printf, void *f,
2745 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002746 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002747 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002748{
Jan Kiszka9479c572011-09-27 15:00:41 +02002749 MemoryRegionList *new_ml, *ml, *next_ml;
2750 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002751 const MemoryRegion *submr;
2752 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002753 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002754
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002755 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002756 return;
2757 }
2758
2759 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002760 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002761 }
2762
Peter Xub31f8412017-03-14 20:56:27 +08002763 cur_start = base + mr->addr;
2764 cur_end = cur_start + MR_SIZE(mr->size);
2765
2766 /*
2767 * Try to detect overflow of memory region. This should never
2768 * happen normally. When it happens, we dump something to warn the
2769 * user who is observing this.
2770 */
2771 if (cur_start < base || cur_end < cur_start) {
2772 mon_printf(f, "[DETECTED OVERFLOW!] ");
2773 }
2774
Blue Swirl314e2982011-09-11 20:22:05 +00002775 if (mr->alias) {
2776 MemoryRegionList *ml;
2777 bool found = false;
2778
2779 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002780 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002781 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002782 found = true;
2783 }
2784 }
2785
2786 if (!found) {
2787 ml = g_new(MemoryRegionList, 1);
2788 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002789 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002790 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002791 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002792 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002793 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002794 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002795 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002796 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002797 memory_region_name(mr),
2798 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002799 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002800 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002801 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002802 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002803 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002804 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002805 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002806 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002807 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002808 memory_region_name(mr),
2809 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002810 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002811
2812 QTAILQ_INIT(&submr_print_queue);
2813
Blue Swirl314e2982011-09-11 20:22:05 +00002814 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002815 new_ml = g_new(MemoryRegionList, 1);
2816 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002817 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002818 if (new_ml->mr->addr < ml->mr->addr ||
2819 (new_ml->mr->addr == ml->mr->addr &&
2820 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002821 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002822 new_ml = NULL;
2823 break;
2824 }
2825 }
2826 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002827 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002828 }
2829 }
2830
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002831 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002832 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002833 alias_print_queue);
2834 }
2835
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002836 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002837 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002838 }
2839}
2840
Peter Xu57bb40c2017-01-16 16:40:05 +08002841static void mtree_print_flatview(fprintf_function p, void *f,
2842 AddressSpace *as)
2843{
2844 FlatView *view = address_space_get_flatview(as);
2845 FlatRange *range = &view->ranges[0];
2846 MemoryRegion *mr;
2847 int n = view->nr;
2848
2849 if (n <= 0) {
2850 p(f, MTREE_INDENT "No rendered FlatView for "
2851 "address space '%s'\n", as->name);
2852 flatview_unref(view);
2853 return;
2854 }
2855
2856 while (n--) {
2857 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002858 if (range->offset_in_region) {
2859 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2860 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2861 int128_get64(range->addr.start),
2862 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2863 mr->priority,
2864 range->readonly ? "rom" : memory_region_type(mr),
2865 memory_region_name(mr),
2866 range->offset_in_region);
2867 } else {
2868 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2869 TARGET_FMT_plx " (prio %d, %s): %s\n",
2870 int128_get64(range->addr.start),
2871 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2872 mr->priority,
2873 range->readonly ? "rom" : memory_region_type(mr),
2874 memory_region_name(mr));
2875 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002876 range++;
2877 }
2878
2879 flatview_unref(view);
2880}
2881
2882void mtree_info(fprintf_function mon_printf, void *f, bool flatview)
Blue Swirl314e2982011-09-11 20:22:05 +00002883{
2884 MemoryRegionListHead ml_head;
2885 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02002886 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00002887
Peter Xu57bb40c2017-01-16 16:40:05 +08002888 if (flatview) {
2889 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2890 mon_printf(f, "address-space (flat view): %s\n", as->name);
2891 mtree_print_flatview(mon_printf, f, as);
2892 mon_printf(f, "\n");
2893 }
2894 return;
2895 }
2896
Blue Swirl314e2982011-09-11 20:22:05 +00002897 QTAILQ_INIT(&ml_head);
2898
Avi Kivity0d673e32012-10-02 15:28:50 +02002899 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002900 mon_printf(f, "address-space: %s\n", as->name);
2901 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2902 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00002903 }
2904
Blue Swirl314e2982011-09-11 20:22:05 +00002905 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002906 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002907 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2908 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2909 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00002910 }
2911
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002912 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02002913 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002914 }
Blue Swirl314e2982011-09-11 20:22:05 +00002915}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002916
Peter Maydellb08199c2017-07-07 15:42:51 +01002917void memory_region_init_ram(MemoryRegion *mr,
2918 struct Object *owner,
2919 const char *name,
2920 uint64_t size,
2921 Error **errp)
2922{
2923 DeviceState *owner_dev;
2924 Error *err = NULL;
2925
2926 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
2927 if (err) {
2928 error_propagate(errp, err);
2929 return;
2930 }
2931 /* This will assert if owner is neither NULL nor a DeviceState.
2932 * We only want the owner here for the purposes of defining a
2933 * unique name for migration. TODO: Ideally we should implement
2934 * a naming scheme for Objects which are not DeviceStates, in
2935 * which case we can relax this restriction.
2936 */
2937 owner_dev = DEVICE(owner);
2938 vmstate_register_ram(mr, owner_dev);
2939}
2940
2941void memory_region_init_rom(MemoryRegion *mr,
2942 struct Object *owner,
2943 const char *name,
2944 uint64_t size,
2945 Error **errp)
2946{
2947 DeviceState *owner_dev;
2948 Error *err = NULL;
2949
2950 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
2951 if (err) {
2952 error_propagate(errp, err);
2953 return;
2954 }
2955 /* This will assert if owner is neither NULL nor a DeviceState.
2956 * We only want the owner here for the purposes of defining a
2957 * unique name for migration. TODO: Ideally we should implement
2958 * a naming scheme for Objects which are not DeviceStates, in
2959 * which case we can relax this restriction.
2960 */
2961 owner_dev = DEVICE(owner);
2962 vmstate_register_ram(mr, owner_dev);
2963}
2964
2965void memory_region_init_rom_device(MemoryRegion *mr,
2966 struct Object *owner,
2967 const MemoryRegionOps *ops,
2968 void *opaque,
2969 const char *name,
2970 uint64_t size,
2971 Error **errp)
2972{
2973 DeviceState *owner_dev;
2974 Error *err = NULL;
2975
2976 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
2977 name, size, &err);
2978 if (err) {
2979 error_propagate(errp, err);
2980 return;
2981 }
2982 /* This will assert if owner is neither NULL nor a DeviceState.
2983 * We only want the owner here for the purposes of defining a
2984 * unique name for migration. TODO: Ideally we should implement
2985 * a naming scheme for Objects which are not DeviceStates, in
2986 * which case we can relax this restriction.
2987 */
2988 owner_dev = DEVICE(owner);
2989 vmstate_register_ram(mr, owner_dev);
2990}
2991
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002992static const TypeInfo memory_region_info = {
2993 .parent = TYPE_OBJECT,
2994 .name = TYPE_MEMORY_REGION,
2995 .instance_size = sizeof(MemoryRegion),
2996 .instance_init = memory_region_initfn,
2997 .instance_finalize = memory_region_finalize,
2998};
2999
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003000static const TypeInfo iommu_memory_region_info = {
3001 .parent = TYPE_MEMORY_REGION,
3002 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003003 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003004 .instance_size = sizeof(IOMMUMemoryRegion),
3005 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003006 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003007};
3008
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003009static void memory_register_types(void)
3010{
3011 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003012 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003013}
3014
3015type_init(memory_register_types)