blob: f797d82dd13ca716c24dc60f4e83376d5b62b44a [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070022#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010023#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030024#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040025#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000026#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030027
Paolo Bonzini022c62c2012-12-17 18:19:49 +010028#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020029#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030030#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080031#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020032#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010033#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020034
Paolo Bonzinid1970632013-05-24 13:23:38 +020035//#define DEBUG_UNASSIGNED
36
Jan Kiszka22bde712012-11-05 16:45:56 +010037static unsigned memory_region_transaction_depth;
38static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080039static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020040static bool global_dirty_log = false;
41
Avi Kivity72e22d22012-02-08 15:05:50 +020042static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
43 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030044
Avi Kivity0d673e32012-10-02 15:28:50 +020045static QTAILQ_HEAD(, AddressSpace) address_spaces
46 = QTAILQ_HEAD_INITIALIZER(address_spaces);
47
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100048static GHashTable *flat_views;
49
Avi Kivity093bc2c2011-07-26 14:26:01 +030050typedef struct AddrRange AddrRange;
51
Avi Kivity8417ceb2011-08-03 11:56:14 +030052/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080053 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030054 * (large MemoryRegion::alias_offset).
55 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030056struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020057 Int128 start;
58 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030059};
60
Avi Kivity08dafab2011-10-16 13:19:17 +020061static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030062{
63 return (AddrRange) { start, size };
64}
65
66static bool addrrange_equal(AddrRange r1, AddrRange r2)
67{
Avi Kivity08dafab2011-10-16 13:19:17 +020068 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030069}
70
Avi Kivity08dafab2011-10-16 13:19:17 +020071static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030072{
Avi Kivity08dafab2011-10-16 13:19:17 +020073 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030074}
75
Avi Kivity08dafab2011-10-16 13:19:17 +020076static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030077{
Avi Kivity08dafab2011-10-16 13:19:17 +020078 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030079 return range;
80}
81
Avi Kivity08dafab2011-10-16 13:19:17 +020082static bool addrrange_contains(AddrRange range, Int128 addr)
83{
84 return int128_ge(addr, range.start)
85 && int128_lt(addr, addrrange_end(range));
86}
87
Avi Kivity093bc2c2011-07-26 14:26:01 +030088static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89{
Avi Kivity08dafab2011-10-16 13:19:17 +020090 return addrrange_contains(r1, r2.start)
91 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030092}
93
94static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95{
Avi Kivity08dafab2011-10-16 13:19:17 +020096 Int128 start = int128_max(r1.start, r2.start);
97 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
98 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +030099}
100
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200101enum ListenerDirection { Forward, Reverse };
102
Avi Kivity7376e582012-02-08 21:05:17 +0200103#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200104 do { \
105 MemoryListener *_listener; \
106 \
107 switch (_direction) { \
108 case Forward: \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
112 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200113 } \
114 break; \
115 case Reverse: \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
120 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200121 } \
122 break; \
123 default: \
124 abort(); \
125 } \
126 } while (0)
127
Paolo Bonzini9a546352016-09-22 16:23:06 +0200128#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200129 do { \
130 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200131 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200132 \
133 switch (_direction) { \
134 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200137 _listener->_callback(_listener, _section, ##_args); \
138 } \
139 } \
140 break; \
141 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
143 link_as) { \
144 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200156 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200160 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200161
Avi Kivity093bc2c2011-07-26 14:26:01 +0300162struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165};
166
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300167struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200171 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300172};
173
Tristan Burgess73bb7532018-05-28 23:04:45 -0400174static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300176{
Tristan Burgess73bb7532018-05-28 23:04:45 -0400177 if (int128_lt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300178 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400179 } else if (int128_gt(a->addr.start, b->addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400181 } else if (int128_lt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400183 } else if (int128_gt(a->addr.size, b->addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300184 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400185 } else if (a->match_data < b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300186 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400187 } else if (a->match_data > b->match_data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300188 return false;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400189 } else if (a->match_data) {
190 if (a->data < b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300191 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400192 } else if (a->data > b->data) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300193 return false;
194 }
195 }
Tristan Burgess73bb7532018-05-28 23:04:45 -0400196 if (a->e < b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300197 return true;
Tristan Burgess73bb7532018-05-28 23:04:45 -0400198 } else if (a->e > b->e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300199 return false;
200 }
201 return false;
202}
203
Tristan Burgess73bb7532018-05-28 23:04:45 -0400204static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300206{
207 return !memory_region_ioeventfd_before(a, b)
208 && !memory_region_ioeventfd_before(b, a);
209}
210
Avi Kivity093bc2c2011-07-26 14:26:01 +0300211/* Range of memory in the global map. Addresses are absolute. */
212struct FlatRange {
213 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200214 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300215 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300216 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200217 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300218 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300219};
220
Avi Kivity093bc2c2011-07-26 14:26:01 +0300221#define FOR_EACH_FLAT_RANGE(var, view) \
222 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
223
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200224static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000225section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200226{
227 return (MemoryRegionSection) {
228 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000229 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200230 .offset_within_region = fr->offset_in_region,
231 .size = fr->addr.size,
232 .offset_within_address_space = int128_get64(fr->addr.start),
233 .readonly = fr->readonly,
234 };
235}
236
Avi Kivity093bc2c2011-07-26 14:26:01 +0300237static bool flatrange_equal(FlatRange *a, FlatRange *b)
238{
239 return a->mr == b->mr
240 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300241 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200242 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300243 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300244}
245
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000246static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300247{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000248 FlatView *view;
249
250 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200251 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000252 view->root = mr_root;
253 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200254 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000255
256 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300257}
258
259/* Insert a range into a given position. Caller is responsible for maintaining
260 * sorting order.
261 */
262static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
263{
264 if (view->nr == view->nr_allocated) {
265 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500266 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300267 view->nr_allocated * sizeof(*view->ranges));
268 }
269 memmove(view->ranges + pos + 1, view->ranges + pos,
270 (view->nr - pos) * sizeof(FlatRange));
271 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200272 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300273 ++view->nr;
274}
275
276static void flatview_destroy(FlatView *view)
277{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200278 int i;
279
Paolo Bonzini02d96512017-09-21 12:34:00 +0200280 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000281 if (view->dispatch) {
282 address_space_dispatch_free(view->dispatch);
283 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200284 for (i = 0; i < view->nr; i++) {
285 memory_region_unref(view->ranges[i].mr);
286 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500287 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000288 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200289 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300290}
291
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200292static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200293{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200294 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200295}
296
Paolo Bonzini48564042018-03-18 18:26:36 +0100297void flatview_unref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200298{
299 if (atomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200300 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000301 assert(view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000302 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200303 }
304}
305
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300306static bool can_merge(FlatRange *r1, FlatRange *r2)
307{
Avi Kivity08dafab2011-10-16 13:19:17 +0200308 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300309 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200310 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
311 r1->addr.size),
312 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300313 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200314 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300315 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300316}
317
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000318/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300319static void flatview_simplify(FlatView *view)
320{
321 unsigned i, j;
322
323 i = 0;
324 while (i < view->nr) {
325 j = i + 1;
326 while (j < view->nr
327 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200328 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300329 ++j;
330 }
331 ++i;
332 memmove(&view->ranges[i], &view->ranges[j],
333 (view->nr - j) * sizeof(view->ranges[j]));
334 view->nr -= j - i;
335 }
336}
337
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200338static bool memory_region_big_endian(MemoryRegion *mr)
339{
340#ifdef TARGET_WORDS_BIGENDIAN
341 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
342#else
343 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
344#endif
345}
346
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200347static bool memory_region_wrong_endianness(MemoryRegion *mr)
348{
349#ifdef TARGET_WORDS_BIGENDIAN
350 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
351#else
352 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
353#endif
354}
355
356static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
357{
358 if (memory_region_wrong_endianness(mr)) {
359 switch (size) {
360 case 1:
361 break;
362 case 2:
363 *data = bswap16(*data);
364 break;
365 case 4:
366 *data = bswap32(*data);
367 break;
368 case 8:
369 *data = bswap64(*data);
370 break;
371 default:
372 abort();
373 }
374 }
375}
376
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800377static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
378{
379 MemoryRegion *root;
380 hwaddr abs_addr = offset;
381
382 abs_addr += mr->addr;
383 for (root = mr; root->container; ) {
384 root = root->container;
385 abs_addr += root->addr;
386 }
387
388 return abs_addr;
389}
390
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800391static int get_cpu_index(void)
392{
393 if (current_cpu) {
394 return current_cpu->cpu_index;
395 }
396 return -1;
397}
398
Peter Maydellcc05c432015-04-26 16:49:23 +0100399static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
400 hwaddr addr,
401 uint64_t *value,
402 unsigned size,
403 unsigned shift,
404 uint64_t mask,
405 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200406{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200407 uint64_t tmp;
408
409 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800410 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800411 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800412 } else if (mr == &io_mem_notdirty) {
413 /* Accesses to code which has previously been translated into a TB show
414 * up in the MMIO path, as accesses to the io_mem_notdirty
415 * MemoryRegion. */
416 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800417 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
418 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800419 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800420 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200421 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100422 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200423}
424
Peter Maydellcc05c432015-04-26 16:49:23 +0100425static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
426 hwaddr addr,
427 uint64_t *value,
428 unsigned size,
429 unsigned shift,
430 uint64_t mask,
431 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300432{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300433 uint64_t tmp;
434
435 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800436 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800437 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800438 } else if (mr == &io_mem_notdirty) {
439 /* Accesses to code which has previously been translated into a TB show
440 * up in the MMIO path, as accesses to the io_mem_notdirty
441 * MemoryRegion. */
442 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800443 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
444 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800445 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800446 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300447 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100448 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300449}
450
Peter Maydellcc05c432015-04-26 16:49:23 +0100451static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
452 hwaddr addr,
453 uint64_t *value,
454 unsigned size,
455 unsigned shift,
456 uint64_t mask,
457 MemTxAttrs attrs)
458{
459 uint64_t tmp = 0;
460 MemTxResult r;
461
Peter Maydellcc05c432015-04-26 16:49:23 +0100462 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800463 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800464 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800465 } else if (mr == &io_mem_notdirty) {
466 /* Accesses to code which has previously been translated into a TB show
467 * up in the MMIO path, as accesses to the io_mem_notdirty
468 * MemoryRegion. */
469 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800470 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
471 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800472 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800473 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100474 *value |= (tmp & mask) << shift;
475 return r;
476}
477
478static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
479 hwaddr addr,
480 uint64_t *value,
481 unsigned size,
482 unsigned shift,
483 uint64_t mask,
484 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200485{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200486 uint64_t tmp;
487
488 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800489 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800490 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800491 } else if (mr == &io_mem_notdirty) {
492 /* Accesses to code which has previously been translated into a TB show
493 * up in the MMIO path, as accesses to the io_mem_notdirty
494 * MemoryRegion. */
495 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800496 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
497 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800498 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800499 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200500 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100501 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200502}
503
Peter Maydellcc05c432015-04-26 16:49:23 +0100504static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
505 hwaddr addr,
506 uint64_t *value,
507 unsigned size,
508 unsigned shift,
509 uint64_t mask,
510 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300511{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300512 uint64_t tmp;
513
514 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800515 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800516 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800517 } else if (mr == &io_mem_notdirty) {
518 /* Accesses to code which has previously been translated into a TB show
519 * up in the MMIO path, as accesses to the io_mem_notdirty
520 * MemoryRegion. */
521 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800522 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
523 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800524 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800525 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300526 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100527 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300528}
529
Peter Maydellcc05c432015-04-26 16:49:23 +0100530static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
531 hwaddr addr,
532 uint64_t *value,
533 unsigned size,
534 unsigned shift,
535 uint64_t mask,
536 MemTxAttrs attrs)
537{
538 uint64_t tmp;
539
Peter Maydellcc05c432015-04-26 16:49:23 +0100540 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800541 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800542 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800543 } else if (mr == &io_mem_notdirty) {
544 /* Accesses to code which has previously been translated into a TB show
545 * up in the MMIO path, as accesses to the io_mem_notdirty
546 * MemoryRegion. */
547 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800548 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
549 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800550 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800551 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100552 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
553}
554
555static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300556 uint64_t *value,
557 unsigned size,
558 unsigned access_size_min,
559 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200560 MemTxResult (*access_fn)
561 (MemoryRegion *mr,
562 hwaddr addr,
563 uint64_t *value,
564 unsigned size,
565 unsigned shift,
566 uint64_t mask,
567 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100568 MemoryRegion *mr,
569 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300570{
571 uint64_t access_mask;
572 unsigned access_size;
573 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100574 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300575
576 if (!access_size_min) {
577 access_size_min = 1;
578 }
579 if (!access_size_max) {
580 access_size_max = 4;
581 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200582
583 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300584 access_size = MAX(MIN(size, access_size_max), access_size_min);
585 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200586 if (memory_region_big_endian(mr)) {
587 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200588 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100589 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200590 }
591 } else {
592 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200593 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100594 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200595 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300596 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100597 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300598}
599
Avi Kivitye2177952011-12-08 15:00:18 +0200600static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
601{
Avi Kivity0d673e32012-10-02 15:28:50 +0200602 AddressSpace *as;
603
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200604 while (mr->container) {
605 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200606 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200607 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
608 if (mr == as->root) {
609 return as;
610 }
Avi Kivitye2177952011-12-08 15:00:18 +0200611 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200612 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200613}
614
Avi Kivity093bc2c2011-07-26 14:26:01 +0300615/* Render a memory region into the global view. Ranges in @view obscure
616 * ranges in @mr.
617 */
618static void render_memory_region(FlatView *view,
619 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200620 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300621 AddrRange clip,
622 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300623{
624 MemoryRegion *subregion;
625 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200626 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200627 Int128 remain;
628 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300629 FlatRange fr;
630 AddrRange tmp;
631
Avi Kivity6bba19b2011-09-14 11:54:58 +0300632 if (!mr->enabled) {
633 return;
634 }
635
Avi Kivity08dafab2011-10-16 13:19:17 +0200636 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300637 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300638
639 tmp = addrrange_make(base, mr->size);
640
641 if (!addrrange_intersects(tmp, clip)) {
642 return;
643 }
644
645 clip = addrrange_intersection(tmp, clip);
646
647 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200648 int128_subfrom(&base, int128_make64(mr->alias->addr));
649 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300650 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300651 return;
652 }
653
654 /* Render subregions in priority order. */
655 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300656 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300657 }
658
Avi Kivity14a3c102011-07-26 14:26:06 +0300659 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300660 return;
661 }
662
Avi Kivity08dafab2011-10-16 13:19:17 +0200663 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300664 base = clip.start;
665 remain = clip.size;
666
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000667 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100668 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200669 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000670 fr.readonly = readonly;
671
Avi Kivity093bc2c2011-07-26 14:26:01 +0300672 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200673 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
674 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300675 continue;
676 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200677 if (int128_lt(base, view->ranges[i].addr.start)) {
678 now = int128_min(remain,
679 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300680 fr.offset_in_region = offset_in_region;
681 fr.addr = addrrange_make(base, now);
682 flatview_insert(view, i, &fr);
683 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200684 int128_addto(&base, now);
685 offset_in_region += int128_get64(now);
686 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300687 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200688 now = int128_sub(int128_min(int128_add(base, remain),
689 addrrange_end(view->ranges[i].addr)),
690 base);
691 int128_addto(&base, now);
692 offset_in_region += int128_get64(now);
693 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300694 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200695 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300696 fr.offset_in_region = offset_in_region;
697 fr.addr = addrrange_make(base, remain);
698 flatview_insert(view, i, &fr);
699 }
700}
701
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000702static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
703{
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200704 while (mr->enabled) {
705 if (mr->alias) {
706 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
707 /* The alias is included in its entirety. Use it as
708 * the "real" root, so that we can share more FlatViews.
709 */
710 mr = mr->alias;
711 continue;
712 }
713 } else if (!mr->terminates) {
714 unsigned int found = 0;
715 MemoryRegion *child, *next = NULL;
716 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
717 if (child->enabled) {
718 if (++found > 1) {
719 next = NULL;
720 break;
721 }
722 if (!child->addr && int128_ge(mr->size, child->size)) {
723 /* A child is included in its entirety. If it's the only
724 * enabled one, use it in the hope of finding an alias down the
725 * way. This will also let us share FlatViews.
726 */
727 next = child;
728 }
729 }
730 }
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000731 if (found == 0) {
732 return NULL;
733 }
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200734 if (next) {
735 mr = next;
736 continue;
737 }
738 }
739
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000740 return mr;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000741 }
742
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000743 return NULL;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000744}
745
Avi Kivity093bc2c2011-07-26 14:26:01 +0300746/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200747static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300748{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000749 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200750 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300751
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000752 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300753
Avi Kivity83f3c252012-10-07 12:59:55 +0200754 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200755 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200756 addrrange_make(int128_zero(), int128_2_64()), false);
757 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200758 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300759
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000760 view->dispatch = address_space_dispatch_new(view);
761 for (i = 0; i < view->nr; i++) {
762 MemoryRegionSection mrs =
763 section_from_flat_range(&view->ranges[i], view);
764 flatview_add_to_dispatch(view, &mrs);
765 }
766 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000767 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000768
Avi Kivity093bc2c2011-07-26 14:26:01 +0300769 return view;
770}
771
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300772static void address_space_add_del_ioeventfds(AddressSpace *as,
773 MemoryRegionIoeventfd *fds_new,
774 unsigned fds_new_nb,
775 MemoryRegionIoeventfd *fds_old,
776 unsigned fds_old_nb)
777{
778 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200779 MemoryRegionIoeventfd *fd;
780 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300781
782 /* Generate a symmetric difference of the old and new fd sets, adding
783 * and deleting as necessary.
784 */
785
786 iold = inew = 0;
787 while (iold < fds_old_nb || inew < fds_new_nb) {
788 if (iold < fds_old_nb
789 && (inew == fds_new_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400790 || memory_region_ioeventfd_before(&fds_old[iold],
791 &fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200792 fd = &fds_old[iold];
793 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000794 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200795 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200796 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200797 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200798 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200799 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300800 ++iold;
801 } else if (inew < fds_new_nb
802 && (iold == fds_old_nb
Tristan Burgess73bb7532018-05-28 23:04:45 -0400803 || memory_region_ioeventfd_before(&fds_new[inew],
804 &fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200805 fd = &fds_new[inew];
806 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000807 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200808 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200809 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200810 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200811 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200812 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300813 ++inew;
814 } else {
815 ++iold;
816 ++inew;
817 }
818 }
819}
820
Paolo Bonzini48564042018-03-18 18:26:36 +0100821FlatView *address_space_get_flatview(AddressSpace *as)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200822{
823 FlatView *view;
824
Paolo Bonzini374f2982013-05-17 12:37:03 +0200825 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200826 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000827 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200828 /* If somebody has replaced as->current_map concurrently,
829 * flatview_ref returns false.
830 */
831 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200832 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200833 return view;
834}
835
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300836static void address_space_update_ioeventfds(AddressSpace *as)
837{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200838 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300839 FlatRange *fr;
840 unsigned ioeventfd_nb = 0;
841 MemoryRegionIoeventfd *ioeventfds = NULL;
842 AddrRange tmp;
843 unsigned i;
844
Paolo Bonzini856d7242013-05-06 11:57:21 +0200845 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200846 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300847 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
848 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200849 int128_sub(fr->addr.start,
850 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300851 if (addrrange_intersects(fr->addr, tmp)) {
852 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500853 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300854 ioeventfd_nb * sizeof(*ioeventfds));
855 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
856 ioeventfds[ioeventfd_nb-1].addr = tmp;
857 }
858 }
859 }
860
861 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
862 as->ioeventfds, as->ioeventfd_nb);
863
Anthony Liguori7267c092011-08-20 22:09:37 -0500864 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300865 as->ioeventfds = ioeventfds;
866 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200867 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300868}
869
Avi Kivityb8af1af2011-07-26 14:26:12 +0300870static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200871 const FlatView *old_view,
872 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300873 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300874{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300875 unsigned iold, inew;
876 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300877
878 /* Generate a symmetric difference of the old and new memory maps.
879 * Kill ranges in the old map, and instantiate ranges in the new map.
880 */
881 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200882 while (iold < old_view->nr || inew < new_view->nr) {
883 if (iold < old_view->nr) {
884 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300885 } else {
886 frold = NULL;
887 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200888 if (inew < new_view->nr) {
889 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300890 } else {
891 frnew = NULL;
892 }
893
894 if (frold
895 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200896 || int128_lt(frold->addr.start, frnew->addr.start)
897 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300898 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000899 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300900
Avi Kivityb8af1af2011-07-26 14:26:12 +0300901 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200902 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300903 }
904
Avi Kivity093bc2c2011-07-26 14:26:01 +0300905 ++iold;
906 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000907 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300908
Avi Kivityb8af1af2011-07-26 14:26:12 +0300909 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200910 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200911 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
912 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
913 frold->dirty_log_mask,
914 frnew->dirty_log_mask);
915 }
916 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
917 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
918 frold->dirty_log_mask,
919 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300920 }
Avi Kivity5a583342011-07-26 14:26:02 +0300921 }
922
Avi Kivity093bc2c2011-07-26 14:26:01 +0300923 ++iold;
924 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300925 } else {
926 /* In new */
927
Avi Kivityb8af1af2011-07-26 14:26:12 +0300928 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200929 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300930 }
931
Avi Kivity093bc2c2011-07-26 14:26:01 +0300932 ++inew;
933 }
934 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300935}
936
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000937static void flatviews_init(void)
938{
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000939 static FlatView *empty_view;
940
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000941 if (flat_views) {
942 return;
943 }
944
945 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
946 (GDestroyNotify) flatview_unref);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000947 if (!empty_view) {
948 empty_view = generate_memory_topology(NULL);
949 /* We keep it alive forever in the global variable. */
950 flatview_ref(empty_view);
951 } else {
952 g_hash_table_replace(flat_views, NULL, empty_view);
953 flatview_ref(empty_view);
954 }
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000955}
956
957static void flatviews_reset(void)
958{
959 AddressSpace *as;
960
961 if (flat_views) {
962 g_hash_table_unref(flat_views);
963 flat_views = NULL;
964 }
965 flatviews_init();
966
967 /* Render unique FVs */
968 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
969 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
970
971 if (g_hash_table_lookup(flat_views, physmr)) {
972 continue;
973 }
974
975 generate_memory_topology(physmr);
976 }
977}
978
979static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +0300980{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000981 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000982 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
983 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
984
985 assert(new_view);
986
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000987 if (old_view == new_view) {
988 return;
989 }
990
991 if (old_view) {
992 flatview_ref(old_view);
993 }
994
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000995 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +1000996
997 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000998 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
999
1000 if (!old_view2) {
1001 old_view2 = &tmpview;
1002 }
1003 address_space_update_topology_pass(as, old_view2, new_view, false);
1004 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001005 }
Avi Kivityb8af1af2011-07-26 14:26:12 +03001006
Paolo Bonzini374f2982013-05-17 12:37:03 +02001007 /* Writes are protected by the BQL. */
1008 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001009 if (old_view) {
1010 flatview_unref(old_view);
1011 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001012
1013 /* Note that all the old MemoryRegions are still alive up to this
1014 * point. This relieves most MemoryListeners from the need to
1015 * ref/unref the MemoryRegions they get---unless they use them
1016 * outside the iothread mutex, in which case precise reference
1017 * counting is necessary.
1018 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001019 if (old_view) {
1020 flatview_unref(old_view);
1021 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001022}
1023
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001024static void address_space_update_topology(AddressSpace *as)
1025{
1026 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1027
1028 flatviews_init();
1029 if (!g_hash_table_lookup(flat_views, physmr)) {
1030 generate_memory_topology(physmr);
1031 }
1032 address_space_set_flatview(as);
1033}
1034
Avi Kivity4ef4db82011-07-26 14:26:13 +03001035void memory_region_transaction_begin(void)
1036{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001037 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001038 ++memory_region_transaction_depth;
1039}
1040
1041void memory_region_transaction_commit(void)
1042{
Avi Kivity0d673e32012-10-02 15:28:50 +02001043 AddressSpace *as;
1044
Avi Kivity4ef4db82011-07-26 14:26:13 +03001045 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001046 assert(qemu_mutex_iothread_locked());
1047
Avi Kivity4ef4db82011-07-26 14:26:13 +03001048 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001049 if (!memory_region_transaction_depth) {
1050 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001051 flatviews_reset();
1052
Gonglei4dc56152014-05-08 11:47:32 +08001053 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001054
Gonglei4dc56152014-05-08 11:47:32 +08001055 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001056 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001057 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001058 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001059 memory_region_update_pending = false;
linzhecheng0b152092018-01-14 20:55:19 +08001060 ioeventfd_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001061 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1062 } else if (ioeventfd_update_pending) {
1063 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1064 address_space_update_ioeventfds(as);
1065 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001066 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001067 }
Gonglei4dc56152014-05-08 11:47:32 +08001068 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001069}
1070
Avi Kivity545e92e2011-08-08 19:58:48 +03001071static void memory_region_destructor_none(MemoryRegion *mr)
1072{
1073}
1074
1075static void memory_region_destructor_ram(MemoryRegion *mr)
1076{
Fam Zhengf1060c52016-03-01 14:18:22 +08001077 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001078}
1079
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001080static bool memory_region_need_escape(char c)
1081{
1082 return c == '/' || c == '[' || c == '\\' || c == ']';
1083}
1084
1085static char *memory_region_escape_name(const char *name)
1086{
1087 const char *p;
1088 char *escaped, *q;
1089 uint8_t c;
1090 size_t bytes = 0;
1091
1092 for (p = name; *p; p++) {
1093 bytes += memory_region_need_escape(*p) ? 4 : 1;
1094 }
1095 if (bytes == p - name) {
1096 return g_memdup(name, bytes + 1);
1097 }
1098
1099 escaped = g_malloc(bytes + 1);
1100 for (p = name, q = escaped; *p; p++) {
1101 c = *p;
1102 if (unlikely(memory_region_need_escape(c))) {
1103 *q++ = '\\';
1104 *q++ = 'x';
1105 *q++ = "0123456789abcdef"[c >> 4];
1106 c = "0123456789abcdef"[c & 15];
1107 }
1108 *q++ = c;
1109 }
1110 *q = 0;
1111 return escaped;
1112}
1113
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001114static void memory_region_do_init(MemoryRegion *mr,
1115 Object *owner,
1116 const char *name,
1117 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001118{
Avi Kivity08dafab2011-10-16 13:19:17 +02001119 mr->size = int128_make64(size);
1120 if (size == UINT64_MAX) {
1121 mr->size = int128_2_64();
1122 }
Peter Maydell302fa282014-08-19 20:05:46 +01001123 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001124 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001125 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001126
1127 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001128 char *escaped_name = memory_region_escape_name(name);
1129 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001130
1131 if (!owner) {
1132 owner = container_get(qdev_get_machine(), "/unattached");
1133 }
1134
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001135 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001136 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001137 g_free(name_array);
1138 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001139 }
1140}
1141
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001142void memory_region_init(MemoryRegion *mr,
1143 Object *owner,
1144 const char *name,
1145 uint64_t size)
1146{
1147 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1148 memory_region_do_init(mr, owner, name, size);
1149}
1150
Eric Blaked7bce992016-01-29 06:48:55 -07001151static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1152 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001153{
1154 MemoryRegion *mr = MEMORY_REGION(obj);
1155 uint64_t value = mr->addr;
1156
Eric Blake51e72bc2016-01-29 06:48:54 -07001157 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001158}
1159
Eric Blaked7bce992016-01-29 06:48:55 -07001160static void memory_region_get_container(Object *obj, Visitor *v,
1161 const char *name, void *opaque,
1162 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001163{
1164 MemoryRegion *mr = MEMORY_REGION(obj);
1165 gchar *path = (gchar *)"";
1166
1167 if (mr->container) {
1168 path = object_get_canonical_path(OBJECT(mr->container));
1169 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001170 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001171 if (mr->container) {
1172 g_free(path);
1173 }
1174}
1175
1176static Object *memory_region_resolve_container(Object *obj, void *opaque,
1177 const char *part)
1178{
1179 MemoryRegion *mr = MEMORY_REGION(obj);
1180
1181 return OBJECT(mr->container);
1182}
1183
Eric Blaked7bce992016-01-29 06:48:55 -07001184static void memory_region_get_priority(Object *obj, Visitor *v,
1185 const char *name, void *opaque,
1186 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001187{
1188 MemoryRegion *mr = MEMORY_REGION(obj);
1189 int32_t value = mr->priority;
1190
Eric Blake51e72bc2016-01-29 06:48:54 -07001191 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001192}
1193
Eric Blaked7bce992016-01-29 06:48:55 -07001194static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1195 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001196{
1197 MemoryRegion *mr = MEMORY_REGION(obj);
1198 uint64_t value = memory_region_size(mr);
1199
Eric Blake51e72bc2016-01-29 06:48:54 -07001200 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001201}
1202
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001203static void memory_region_initfn(Object *obj)
1204{
1205 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001206 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001207
1208 mr->ops = &unassigned_mem_ops;
1209 mr->enabled = true;
1210 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001211 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001212 mr->destructor = memory_region_destructor_none;
1213 QTAILQ_INIT(&mr->subregions);
1214 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001215
1216 op = object_property_add(OBJECT(mr), "container",
1217 "link<" TYPE_MEMORY_REGION ">",
1218 memory_region_get_container,
1219 NULL, /* memory_region_set_container */
1220 NULL, NULL, &error_abort);
1221 op->resolve = memory_region_resolve_container;
1222
1223 object_property_add(OBJECT(mr), "addr", "uint64",
1224 memory_region_get_addr,
1225 NULL, /* memory_region_set_addr */
1226 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001227 object_property_add(OBJECT(mr), "priority", "uint32",
1228 memory_region_get_priority,
1229 NULL, /* memory_region_set_priority */
1230 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001231 object_property_add(OBJECT(mr), "size", "uint64",
1232 memory_region_get_size,
1233 NULL, /* memory_region_set_size, */
1234 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001235}
1236
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001237static void iommu_memory_region_initfn(Object *obj)
1238{
1239 MemoryRegion *mr = MEMORY_REGION(obj);
1240
1241 mr->is_iommu = true;
1242}
1243
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001244static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1245 unsigned size)
1246{
1247#ifdef DEBUG_UNASSIGNED
1248 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1249#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001250 if (current_cpu != NULL) {
Peter Maydelldbea78a2018-08-14 17:17:19 +01001251 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1252 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001253 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001254 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001255}
1256
1257static void unassigned_mem_write(void *opaque, hwaddr addr,
1258 uint64_t val, unsigned size)
1259{
1260#ifdef DEBUG_UNASSIGNED
1261 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1262#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001263 if (current_cpu != NULL) {
1264 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001265 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001266}
1267
Paolo Bonzinid1970632013-05-24 13:23:38 +02001268static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
Peter Maydell8372d382018-05-31 14:50:52 +01001269 unsigned size, bool is_write,
1270 MemTxAttrs attrs)
Paolo Bonzinid1970632013-05-24 13:23:38 +02001271{
1272 return false;
1273}
1274
1275const MemoryRegionOps unassigned_mem_ops = {
1276 .valid.accepts = unassigned_mem_accepts,
1277 .endianness = DEVICE_NATIVE_ENDIAN,
1278};
1279
Alex Williamson4a2e2422016-10-31 09:53:03 -06001280static uint64_t memory_region_ram_device_read(void *opaque,
1281 hwaddr addr, unsigned size)
1282{
1283 MemoryRegion *mr = opaque;
1284 uint64_t data = (uint64_t)~0;
1285
1286 switch (size) {
1287 case 1:
1288 data = *(uint8_t *)(mr->ram_block->host + addr);
1289 break;
1290 case 2:
1291 data = *(uint16_t *)(mr->ram_block->host + addr);
1292 break;
1293 case 4:
1294 data = *(uint32_t *)(mr->ram_block->host + addr);
1295 break;
1296 case 8:
1297 data = *(uint64_t *)(mr->ram_block->host + addr);
1298 break;
1299 }
1300
1301 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1302
1303 return data;
1304}
1305
1306static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1307 uint64_t data, unsigned size)
1308{
1309 MemoryRegion *mr = opaque;
1310
1311 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1312
1313 switch (size) {
1314 case 1:
1315 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1316 break;
1317 case 2:
1318 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1319 break;
1320 case 4:
1321 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1322 break;
1323 case 8:
1324 *(uint64_t *)(mr->ram_block->host + addr) = data;
1325 break;
1326 }
1327}
1328
1329static const MemoryRegionOps ram_device_mem_ops = {
1330 .read = memory_region_ram_device_read,
1331 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001332 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001333 .valid = {
1334 .min_access_size = 1,
1335 .max_access_size = 8,
1336 .unaligned = true,
1337 },
1338 .impl = {
1339 .min_access_size = 1,
1340 .max_access_size = 8,
1341 .unaligned = true,
1342 },
1343};
1344
Paolo Bonzinid2702032013-05-24 11:55:06 +02001345bool memory_region_access_valid(MemoryRegion *mr,
1346 hwaddr addr,
1347 unsigned size,
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001348 bool is_write,
1349 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001350{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001351 int access_size_min, access_size_max;
1352 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001353
Avi Kivity093bc2c2011-07-26 14:26:01 +03001354 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1355 return false;
1356 }
1357
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001358 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001359 return true;
1360 }
1361
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001362 access_size_min = mr->ops->valid.min_access_size;
1363 if (!mr->ops->valid.min_access_size) {
1364 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001365 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001366
1367 access_size_max = mr->ops->valid.max_access_size;
1368 if (!mr->ops->valid.max_access_size) {
1369 access_size_max = 4;
1370 }
1371
1372 access_size = MAX(MIN(size, access_size_max), access_size_min);
1373 for (i = 0; i < size; i += access_size) {
1374 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
Peter Maydell8372d382018-05-31 14:50:52 +01001375 is_write, attrs)) {
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001376 return false;
1377 }
1378 }
1379
Avi Kivity093bc2c2011-07-26 14:26:01 +03001380 return true;
1381}
1382
Peter Maydellcc05c432015-04-26 16:49:23 +01001383static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1384 hwaddr addr,
1385 uint64_t *pval,
1386 unsigned size,
1387 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001388{
Peter Maydellcc05c432015-04-26 16:49:23 +01001389 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001390
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001391 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001392 return access_with_adjusted_size(addr, pval, size,
1393 mr->ops->impl.min_access_size,
1394 mr->ops->impl.max_access_size,
1395 memory_region_read_accessor,
1396 mr, attrs);
1397 } else if (mr->ops->read_with_attrs) {
1398 return access_with_adjusted_size(addr, pval, size,
1399 mr->ops->impl.min_access_size,
1400 mr->ops->impl.max_access_size,
1401 memory_region_read_with_attrs_accessor,
1402 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001403 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001404 return access_with_adjusted_size(addr, pval, size, 1, 4,
1405 memory_region_oldmmio_read_accessor,
1406 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001407 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001408}
1409
Peter Maydell3b643492015-04-26 16:49:23 +01001410MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1411 hwaddr addr,
1412 uint64_t *pval,
1413 unsigned size,
1414 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001415{
Peter Maydellcc05c432015-04-26 16:49:23 +01001416 MemTxResult r;
1417
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001418 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001419 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001420 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001421 }
Avi Kivitya621f382012-01-02 13:12:08 +02001422
Peter Maydellcc05c432015-04-26 16:49:23 +01001423 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001424 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001425 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001426}
1427
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001428/* Return true if an eventfd was signalled */
1429static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1430 hwaddr addr,
1431 uint64_t data,
1432 unsigned size,
1433 MemTxAttrs attrs)
1434{
1435 MemoryRegionIoeventfd ioeventfd = {
1436 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1437 .data = data,
1438 };
1439 unsigned i;
1440
1441 for (i = 0; i < mr->ioeventfd_nb; i++) {
1442 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1443 ioeventfd.e = mr->ioeventfds[i].e;
1444
Tristan Burgess73bb7532018-05-28 23:04:45 -04001445 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001446 event_notifier_set(ioeventfd.e);
1447 return true;
1448 }
1449 }
1450
1451 return false;
1452}
1453
Peter Maydell3b643492015-04-26 16:49:23 +01001454MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1455 hwaddr addr,
1456 uint64_t data,
1457 unsigned size,
1458 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001459{
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001460 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001461 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001462 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001463 }
1464
Avi Kivitya621f382012-01-02 13:12:08 +02001465 adjust_endianness(mr, &data, size);
1466
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001467 if ((!kvm_eventfds_enabled()) &&
1468 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1469 return MEMTX_OK;
1470 }
1471
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001472 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001473 return access_with_adjusted_size(addr, &data, size,
1474 mr->ops->impl.min_access_size,
1475 mr->ops->impl.max_access_size,
1476 memory_region_write_accessor, mr,
1477 attrs);
1478 } else if (mr->ops->write_with_attrs) {
1479 return
1480 access_with_adjusted_size(addr, &data, size,
1481 mr->ops->impl.min_access_size,
1482 mr->ops->impl.max_access_size,
1483 memory_region_write_with_attrs_accessor,
1484 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001485 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001486 return access_with_adjusted_size(addr, &data, size, 1, 4,
1487 memory_region_oldmmio_write_accessor,
1488 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001489 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001490}
1491
Avi Kivity093bc2c2011-07-26 14:26:01 +03001492void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001493 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001494 const MemoryRegionOps *ops,
1495 void *opaque,
1496 const char *name,
1497 uint64_t size)
1498{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001499 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001500 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001501 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001502 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001503}
1504
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001505void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1506 Object *owner,
1507 const char *name,
1508 uint64_t size,
1509 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001510{
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001511 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1512}
1513
1514void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1515 Object *owner,
1516 const char *name,
1517 uint64_t size,
1518 bool share,
1519 Error **errp)
1520{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001521 Error *err = NULL;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001522 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001523 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001524 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001525 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001526 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001527 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001528 if (err) {
1529 mr->size = int128_zero();
1530 object_unparent(OBJECT(mr));
1531 error_propagate(errp, err);
1532 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001533}
1534
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001535void memory_region_init_resizeable_ram(MemoryRegion *mr,
1536 Object *owner,
1537 const char *name,
1538 uint64_t size,
1539 uint64_t max_size,
1540 void (*resized)(const char*,
1541 uint64_t length,
1542 void *host),
1543 Error **errp)
1544{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001545 Error *err = NULL;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001546 memory_region_init(mr, owner, name, size);
1547 mr->ram = true;
1548 mr->terminates = true;
1549 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001550 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001551 mr, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001552 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001553 if (err) {
1554 mr->size = int128_zero();
1555 object_unparent(OBJECT(mr));
1556 error_propagate(errp, err);
1557 }
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001558}
1559
Hikaru Nishidad5dbde42018-09-24 21:32:05 +09001560#ifdef CONFIG_POSIX
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001561void memory_region_init_ram_from_file(MemoryRegion *mr,
1562 struct Object *owner,
1563 const char *name,
1564 uint64_t size,
Haozhong Zhang98376842017-12-11 15:28:04 +08001565 uint64_t align,
Junyan Hecbfc0172018-07-18 15:47:58 +08001566 uint32_t ram_flags,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001567 const char *path,
1568 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001569{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001570 Error *err = NULL;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001571 memory_region_init(mr, owner, name, size);
1572 mr->ram = true;
1573 mr->terminates = true;
1574 mr->destructor = memory_region_destructor_ram;
Haozhong Zhang98376842017-12-11 15:28:04 +08001575 mr->align = align;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001576 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001577 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001578 if (err) {
1579 mr->size = int128_zero();
1580 object_unparent(OBJECT(mr));
1581 error_propagate(errp, err);
1582 }
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001583}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001584
1585void memory_region_init_ram_from_fd(MemoryRegion *mr,
1586 struct Object *owner,
1587 const char *name,
1588 uint64_t size,
1589 bool share,
1590 int fd,
1591 Error **errp)
1592{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001593 Error *err = NULL;
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001594 memory_region_init(mr, owner, name, size);
1595 mr->ram = true;
1596 mr->terminates = true;
1597 mr->destructor = memory_region_destructor_ram;
Junyan Hecbfc0172018-07-18 15:47:58 +08001598 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1599 share ? RAM_SHARED : 0,
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001600 fd, &err);
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001601 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001602 if (err) {
1603 mr->size = int128_zero();
1604 object_unparent(OBJECT(mr));
1605 error_propagate(errp, err);
1606 }
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001607}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001608#endif
1609
Avi Kivity093bc2c2011-07-26 14:26:01 +03001610void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001611 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001612 const char *name,
1613 uint64_t size,
1614 void *ptr)
1615{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001616 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001617 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001618 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001619 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001620 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001621
1622 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1623 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001624 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001625}
1626
Alex Williamson21e00fa2016-10-31 09:53:03 -06001627void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1628 Object *owner,
1629 const char *name,
1630 uint64_t size,
1631 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301632{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001633 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1634 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001635 mr->ops = &ram_device_mem_ops;
1636 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301637}
1638
Avi Kivity093bc2c2011-07-26 14:26:01 +03001639void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001640 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001641 const char *name,
1642 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001643 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001644 uint64_t size)
1645{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001646 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001647 mr->alias = orig;
1648 mr->alias_offset = offset;
1649}
1650
Peter Maydellb59821a2017-07-07 15:42:50 +01001651void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1652 struct Object *owner,
1653 const char *name,
1654 uint64_t size,
1655 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001656{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001657 Error *err = NULL;
Peter Maydella1777f72016-07-04 13:06:35 +01001658 memory_region_init(mr, owner, name, size);
1659 mr->ram = true;
1660 mr->readonly = true;
1661 mr->terminates = true;
1662 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001663 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
Peter Maydella1777f72016-07-04 13:06:35 +01001664 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001665 if (err) {
1666 mr->size = int128_zero();
1667 object_unparent(OBJECT(mr));
1668 error_propagate(errp, err);
1669 }
Peter Maydella1777f72016-07-04 13:06:35 +01001670}
1671
Peter Maydellb59821a2017-07-07 15:42:50 +01001672void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1673 Object *owner,
1674 const MemoryRegionOps *ops,
1675 void *opaque,
1676 const char *name,
1677 uint64_t size,
1678 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001679{
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001680 Error *err = NULL;
Peter Maydell39e0b032016-07-04 13:06:35 +01001681 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001682 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001683 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001684 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001685 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001686 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001687 mr->destructor = memory_region_destructor_ram;
Igor Mammedov1cd3d492018-09-04 14:39:37 +02001688 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1689 if (err) {
1690 mr->size = int128_zero();
1691 object_unparent(OBJECT(mr));
1692 error_propagate(errp, err);
1693 }
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001694}
1695
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001696void memory_region_init_iommu(void *_iommu_mr,
1697 size_t instance_size,
1698 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001699 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001700 const char *name,
1701 uint64_t size)
1702{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001703 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001704 struct MemoryRegion *mr;
1705
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001706 object_initialize(_iommu_mr, instance_size, mrtypename);
1707 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001708 memory_region_do_init(mr, owner, name, size);
1709 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001710 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001711 QLIST_INIT(&iommu_mr->iommu_notify);
1712 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001713}
1714
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001715static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001716{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001717 MemoryRegion *mr = MEMORY_REGION(obj);
1718
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001719 assert(!mr->container);
1720
1721 /* We know the region is not visible in any address space (it
1722 * does not have a container and cannot be a root either because
1723 * it has no references, so we can blindly clear mr->enabled.
1724 * memory_region_set_enabled instead could trigger a transaction
1725 * and cause an infinite loop.
1726 */
1727 mr->enabled = false;
1728 memory_region_transaction_begin();
1729 while (!QTAILQ_EMPTY(&mr->subregions)) {
1730 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1731 memory_region_del_subregion(mr, subregion);
1732 }
1733 memory_region_transaction_commit();
1734
Avi Kivity545e92e2011-08-08 19:58:48 +03001735 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001736 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001737 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001738 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001739}
1740
Paolo Bonzini803c0812013-05-07 06:59:09 +02001741Object *memory_region_owner(MemoryRegion *mr)
1742{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001743 Object *obj = OBJECT(mr);
1744 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001745}
1746
Paolo Bonzini46637be2013-05-07 09:06:00 +02001747void memory_region_ref(MemoryRegion *mr)
1748{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001749 /* MMIO callbacks most likely will access data that belongs
1750 * to the owner, hence the need to ref/unref the owner whenever
1751 * the memory region is in use.
1752 *
1753 * The memory region is a child of its owner. As long as the
1754 * owner doesn't call unparent itself on the memory region,
1755 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001756 * Memory regions without an owner are supposed to never go away;
1757 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001758 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001759 if (mr && mr->owner) {
1760 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001761 }
1762}
1763
1764void memory_region_unref(MemoryRegion *mr)
1765{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001766 if (mr && mr->owner) {
1767 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001768 }
1769}
1770
Avi Kivity093bc2c2011-07-26 14:26:01 +03001771uint64_t memory_region_size(MemoryRegion *mr)
1772{
Avi Kivity08dafab2011-10-16 13:19:17 +02001773 if (int128_eq(mr->size, int128_2_64())) {
1774 return UINT64_MAX;
1775 }
1776 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001777}
1778
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001779const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001780{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001781 if (!mr->name) {
1782 ((MemoryRegion *)mr)->name =
1783 object_get_canonical_path_component(OBJECT(mr));
1784 }
Peter Maydell302fa282014-08-19 20:05:46 +01001785 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001786}
1787
Alex Williamson21e00fa2016-10-31 09:53:03 -06001788bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301789{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001790 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301791}
1792
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001793uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001794{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001795 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001796 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001797 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1798 }
1799 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001800}
1801
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001802bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1803{
1804 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1805}
1806
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001807static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001808{
1809 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1810 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001811 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001812
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001813 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001814 flags |= iommu_notifier->notifier_flags;
1815 }
1816
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001817 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1818 imrc->notify_flag_changed(iommu_mr,
1819 iommu_mr->iommu_notify_flags,
1820 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001821 }
1822
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001823 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001824}
1825
Peter Xucdb30812016-09-23 13:02:26 +08001826void memory_region_register_iommu_notifier(MemoryRegion *mr,
1827 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001828{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001829 IOMMUMemoryRegion *iommu_mr;
1830
Jason Wangefcd38c2016-12-30 18:09:17 +08001831 if (mr->alias) {
1832 memory_region_register_iommu_notifier(mr->alias, n);
1833 return;
1834 }
1835
Peter Xucdb30812016-09-23 13:02:26 +08001836 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001837 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001838 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001839 assert(n->start <= n->end);
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001840 assert(n->iommu_idx >= 0 &&
1841 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1842
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001843 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1844 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001845}
1846
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001847uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001848{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001849 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1850
1851 if (imrc->get_min_page_size) {
1852 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001853 }
1854 return TARGET_PAGE_SIZE;
1855}
1856
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001857void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001858{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001859 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001860 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001861 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001862 IOMMUTLBEntry iotlb;
1863
Peter Xufaa362e2017-04-07 18:59:11 +08001864 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001865 if (imrc->replay) {
1866 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001867 return;
1868 }
1869
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001870 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001871
David Gibsona788f222015-09-30 12:13:55 +10001872 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Peter Maydell2c91bcf2018-06-15 14:57:16 +01001873 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
David Gibsona788f222015-09-30 12:13:55 +10001874 if (iotlb.perm != IOMMU_NONE) {
1875 n->notify(n, &iotlb);
1876 }
1877
1878 /* if (2^64 - MR size) < granularity, it's possible to get an
1879 * infinite loop here. This should catch such a wraparound */
1880 if ((addr + granularity) < addr) {
1881 break;
1882 }
1883 }
1884}
1885
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001886void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001887{
1888 IOMMUNotifier *notifier;
1889
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001890 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1891 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001892 }
1893}
1894
Peter Xucdb30812016-09-23 13:02:26 +08001895void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1896 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001897{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001898 IOMMUMemoryRegion *iommu_mr;
1899
Jason Wangefcd38c2016-12-30 18:09:17 +08001900 if (mr->alias) {
1901 memory_region_unregister_iommu_notifier(mr->alias, n);
1902 return;
1903 }
Peter Xucdb30812016-09-23 13:02:26 +08001904 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001905 iommu_mr = IOMMU_MEMORY_REGION(mr);
1906 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001907}
1908
Peter Xubd2bfa42017-04-07 18:59:10 +08001909void memory_region_notify_one(IOMMUNotifier *notifier,
1910 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001911{
Peter Xucdb30812016-09-23 13:02:26 +08001912 IOMMUNotifierFlag request_flags;
1913
Peter Xubd2bfa42017-04-07 18:59:10 +08001914 /*
1915 * Skip the notification if the notification does not overlap
1916 * with registered range.
1917 */
Maxime Coquelinb021d1c2017-10-10 11:42:47 +02001918 if (notifier->start > entry->iova + entry->addr_mask ||
Peter Xubd2bfa42017-04-07 18:59:10 +08001919 notifier->end < entry->iova) {
1920 return;
1921 }
Peter Xucdb30812016-09-23 13:02:26 +08001922
Peter Xubd2bfa42017-04-07 18:59:10 +08001923 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001924 request_flags = IOMMU_NOTIFIER_MAP;
1925 } else {
1926 request_flags = IOMMU_NOTIFIER_UNMAP;
1927 }
1928
Peter Xubd2bfa42017-04-07 18:59:10 +08001929 if (notifier->notifier_flags & request_flags) {
1930 notifier->notify(notifier, entry);
1931 }
1932}
1933
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001934void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001935 int iommu_idx,
Peter Xubd2bfa42017-04-07 18:59:10 +08001936 IOMMUTLBEntry entry)
1937{
1938 IOMMUNotifier *iommu_notifier;
1939
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001940 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001941
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001942 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Maydellcb1efcf2018-06-15 14:57:16 +01001943 if (iommu_notifier->iommu_idx == iommu_idx) {
1944 memory_region_notify_one(iommu_notifier, &entry);
1945 }
Peter Xucdb30812016-09-23 13:02:26 +08001946 }
David Gibson06866572013-05-14 19:13:56 +10001947}
1948
Alexey Kardashevskiyf1334de2018-02-06 11:08:24 -07001949int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1950 enum IOMMUMemoryRegionAttr attr,
1951 void *data)
1952{
1953 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1954
1955 if (!imrc->get_attr) {
1956 return -EINVAL;
1957 }
1958
1959 return imrc->get_attr(iommu_mr, attr, data);
1960}
1961
Peter Maydell21f40202018-06-15 14:57:15 +01001962int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1963 MemTxAttrs attrs)
1964{
1965 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1966
1967 if (!imrc->attrs_to_index) {
1968 return 0;
1969 }
1970
1971 return imrc->attrs_to_index(iommu_mr, attrs);
1972}
1973
1974int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
1975{
1976 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1977
1978 if (!imrc->num_indexes) {
1979 return 1;
1980 }
1981
1982 return imrc->num_indexes(iommu_mr);
1983}
1984
Avi Kivity093bc2c2011-07-26 14:26:01 +03001985void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1986{
Avi Kivity5a583342011-07-26 14:26:02 +03001987 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001988 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001989
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001990 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001991 old_logging = mr->vga_logging_count;
1992 mr->vga_logging_count += log ? 1 : -1;
1993 if (!!old_logging == !!mr->vga_logging_count) {
1994 return;
1995 }
1996
Jan Kiszka59023ef2012-08-23 13:02:30 +02001997 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001998 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001999 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002000 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002001}
2002
Avi Kivitya8170e52012-10-23 12:30:10 +02002003bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
2004 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002005{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002006 assert(mr->ram_block);
2007 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
2008 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002009}
2010
Avi Kivitya8170e52012-10-23 12:30:10 +02002011void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2012 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002013{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002014 assert(mr->ram_block);
2015 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2016 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01002017 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002018}
2019
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002020static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002021{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002022 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02002023 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002024 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03002025 FlatRange *fr;
2026
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002027 /* If the same address space has multiple log_sync listeners, we
2028 * visit that address space's FlatView multiple times. But because
2029 * log_sync listeners are rare, it's still cheaper than walking each
2030 * address space once.
2031 */
2032 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2033 if (!listener->log_sync) {
2034 continue;
2035 }
2036 as = listener->address_space;
2037 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002038 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002039 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002040 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02002041 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02002042 }
Avi Kivity5a583342011-07-26 14:26:02 +03002043 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002044 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03002045 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002046}
2047
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01002048DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2049 hwaddr addr,
2050 hwaddr size,
2051 unsigned client)
2052{
2053 assert(mr->ram_block);
2054 memory_region_sync_dirty_bitmap(mr);
2055 return cpu_physical_memory_snapshot_and_clear_dirty(
2056 memory_region_get_ram_addr(mr) + addr, size, client);
2057}
2058
2059bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2060 hwaddr addr, hwaddr size)
2061{
2062 assert(mr->ram_block);
2063 return cpu_physical_memory_snapshot_get_dirty(snap,
2064 memory_region_get_ram_addr(mr) + addr, size);
2065}
2066
Avi Kivity093bc2c2011-07-26 14:26:01 +03002067void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2068{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002069 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002070 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002071 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01002072 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002073 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002074 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002075}
2076
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002077void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002078{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002079 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002080 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002081 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01002082 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002083 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002084 }
2085}
2086
Avi Kivitya8170e52012-10-23 12:30:10 +02002087void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2088 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002089{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002090 assert(mr->ram_block);
2091 cpu_physical_memory_test_and_clear_dirty(
2092 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002093}
2094
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002095int memory_region_get_fd(MemoryRegion *mr)
2096{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002097 int fd;
2098
2099 rcu_read_lock();
2100 while (mr->alias) {
2101 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002102 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002103 fd = mr->ram_block->fd;
2104 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002105
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002106 return fd;
2107}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002108
Avi Kivity093bc2c2011-07-26 14:26:01 +03002109void *memory_region_get_ram_ptr(MemoryRegion *mr)
2110{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002111 void *ptr;
2112 uint64_t offset = 0;
2113
2114 rcu_read_lock();
2115 while (mr->alias) {
2116 offset += mr->alias_offset;
2117 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002118 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002119 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002120 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002121 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002122
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002123 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002124}
2125
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002126MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2127{
2128 RAMBlock *block;
2129
2130 block = qemu_ram_block_from_host(ptr, false, offset);
2131 if (!block) {
2132 return NULL;
2133 }
2134
2135 return block->mr;
2136}
2137
Fam Zheng7ebb2742016-03-01 14:18:20 +08002138ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2139{
2140 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2141}
2142
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002143void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2144{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002145 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002146
Gongleifa53a0e2016-05-10 10:04:59 +08002147 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002148}
2149
Avi Kivity0d673e32012-10-02 15:28:50 +02002150static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002151{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002152 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002153 FlatRange *fr;
2154 CoalescedMemoryRange *cmr;
2155 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02002156 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002157
Paolo Bonzini856d7242013-05-06 11:57:21 +02002158 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002159 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002160 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02002161 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002162 .fv = view,
Avi Kivity95d29942012-10-02 18:21:54 +02002163 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002164 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02002165 };
2166
Paolo Bonzini9a546352016-09-22 16:23:06 +02002167 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002168 int128_get64(fr->addr.start),
2169 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002170 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2171 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02002172 int128_sub(fr->addr.start,
2173 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002174 if (!addrrange_intersects(tmp, fr->addr)) {
2175 continue;
2176 }
2177 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002178 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002179 int128_get64(tmp.start),
2180 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002181 }
2182 }
2183 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002184 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002185}
2186
Avi Kivity0d673e32012-10-02 15:28:50 +02002187static void memory_region_update_coalesced_range(MemoryRegion *mr)
2188{
2189 AddressSpace *as;
2190
2191 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2192 memory_region_update_coalesced_range_as(mr, as);
2193 }
2194}
2195
Avi Kivity093bc2c2011-07-26 14:26:01 +03002196void memory_region_set_coalescing(MemoryRegion *mr)
2197{
2198 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002199 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002200}
2201
2202void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002203 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002204 uint64_t size)
2205{
Anthony Liguori7267c092011-08-20 22:09:37 -05002206 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002207
Avi Kivity08dafab2011-10-16 13:19:17 +02002208 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002209 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2210 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002211 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002212}
2213
2214void memory_region_clear_coalescing(MemoryRegion *mr)
2215{
2216 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002217 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002218
Jan Kiszkad4105152012-08-23 13:02:29 +02002219 qemu_flush_coalesced_mmio_buffer();
2220 mr->flush_coalesced_mmio = false;
2221
Avi Kivity093bc2c2011-07-26 14:26:01 +03002222 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2223 cmr = QTAILQ_FIRST(&mr->coalesced);
2224 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002225 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002226 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002227 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002228
2229 if (updated) {
2230 memory_region_update_coalesced_range(mr);
2231 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002232}
2233
Jan Kiszkad4105152012-08-23 13:02:29 +02002234void memory_region_set_flush_coalesced(MemoryRegion *mr)
2235{
2236 mr->flush_coalesced_mmio = true;
2237}
2238
2239void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2240{
2241 qemu_flush_coalesced_mmio_buffer();
2242 if (QTAILQ_EMPTY(&mr->coalesced)) {
2243 mr->flush_coalesced_mmio = false;
2244 }
2245}
2246
Jan Kiszka196ea132015-06-18 18:47:20 +02002247void memory_region_clear_global_locking(MemoryRegion *mr)
2248{
2249 mr->global_locking = false;
2250}
2251
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002252static bool userspace_eventfd_warning;
2253
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002254void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002255 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002256 unsigned size,
2257 bool match_data,
2258 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002259 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002260{
2261 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002262 .addr.start = int128_make64(addr),
2263 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002264 .match_data = match_data,
2265 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002266 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002267 };
2268 unsigned i;
2269
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002270 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2271 userspace_eventfd_warning))) {
2272 userspace_eventfd_warning = true;
2273 error_report("Using eventfd without MMIO binding in KVM. "
2274 "Suboptimal performance expected");
2275 }
2276
Jason Wangb8aecea2015-11-06 16:02:45 +08002277 if (size) {
2278 adjust_endianness(mr, &mrfd.data, size);
2279 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002280 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002281 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002282 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002283 break;
2284 }
2285 }
2286 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002287 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002288 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2289 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2290 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2291 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002292 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002293 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002294}
2295
2296void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002297 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002298 unsigned size,
2299 bool match_data,
2300 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002301 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002302{
2303 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002304 .addr.start = int128_make64(addr),
2305 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002306 .match_data = match_data,
2307 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002308 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002309 };
2310 unsigned i;
2311
Jason Wangb8aecea2015-11-06 16:02:45 +08002312 if (size) {
2313 adjust_endianness(mr, &mrfd.data, size);
2314 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002315 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002316 for (i = 0; i < mr->ioeventfd_nb; ++i) {
Tristan Burgess73bb7532018-05-28 23:04:45 -04002317 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002318 break;
2319 }
2320 }
2321 assert(i != mr->ioeventfd_nb);
2322 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2323 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2324 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002325 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002326 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002327 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002328 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002329}
2330
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002331static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002332{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002333 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002334 MemoryRegion *other;
2335
Jan Kiszka59023ef2012-08-23 13:02:30 +02002336 memory_region_transaction_begin();
2337
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002338 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002339 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002340 if (subregion->priority >= other->priority) {
2341 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2342 goto done;
2343 }
2344 }
2345 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2346done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002347 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002348 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002349}
2350
Peter Crosthwaite05987012014-06-05 23:14:44 -07002351static void memory_region_add_subregion_common(MemoryRegion *mr,
2352 hwaddr offset,
2353 MemoryRegion *subregion)
2354{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002355 assert(!subregion->container);
2356 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002357 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002358 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002359}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002360
2361void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002362 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002363 MemoryRegion *subregion)
2364{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002365 subregion->priority = 0;
2366 memory_region_add_subregion_common(mr, offset, subregion);
2367}
2368
2369void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002370 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002371 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002372 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002373{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002374 subregion->priority = priority;
2375 memory_region_add_subregion_common(mr, offset, subregion);
2376}
2377
2378void memory_region_del_subregion(MemoryRegion *mr,
2379 MemoryRegion *subregion)
2380{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002381 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002382 assert(subregion->container == mr);
2383 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002384 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002385 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002386 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002387 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002388}
2389
2390void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2391{
2392 if (enabled == mr->enabled) {
2393 return;
2394 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002395 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002396 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002397 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002398 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002399}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002400
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002401void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2402{
2403 Int128 s = int128_make64(size);
2404
2405 if (size == UINT64_MAX) {
2406 s = int128_2_64();
2407 }
2408 if (int128_eq(s, mr->size)) {
2409 return;
2410 }
2411 memory_region_transaction_begin();
2412 mr->size = s;
2413 memory_region_update_pending = true;
2414 memory_region_transaction_commit();
2415}
2416
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002417static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002418{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002419 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002420
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002421 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002422 memory_region_transaction_begin();
2423 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002424 memory_region_del_subregion(container, mr);
2425 mr->container = container;
2426 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002427 memory_region_unref(mr);
2428 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002429 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002430}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002431
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002432void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2433{
2434 if (addr != mr->addr) {
2435 mr->addr = addr;
2436 memory_region_readd_subregion(mr);
2437 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002438}
2439
Avi Kivitya8170e52012-10-23 12:30:10 +02002440void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002441{
Avi Kivity47033592011-12-04 19:16:50 +02002442 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002443
Jan Kiszka59023ef2012-08-23 13:02:30 +02002444 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002445 return;
2446 }
2447
Jan Kiszka59023ef2012-08-23 13:02:30 +02002448 memory_region_transaction_begin();
2449 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002450 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002451 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002452}
2453
Igor Mammedova2b257d2014-10-31 16:38:37 +00002454uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2455{
2456 return mr->align;
2457}
2458
Avi Kivitye2177952011-12-08 15:00:18 +02002459static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2460{
2461 const AddrRange *addr = addr_;
2462 const FlatRange *fr = fr_;
2463
2464 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2465 return -1;
2466 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2467 return 1;
2468 }
2469 return 0;
2470}
2471
Paolo Bonzini99e86342013-05-06 10:26:13 +02002472static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002473{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002474 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002475 sizeof(FlatRange), cmp_flatrange_addr);
2476}
2477
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002478bool memory_region_is_mapped(MemoryRegion *mr)
2479{
2480 return mr->container ? true : false;
2481}
2482
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002483/* Same as memory_region_find, but it does not add a reference to the
2484 * returned region. It must be called from an RCU critical section.
2485 */
2486static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2487 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002488{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002489 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002490 MemoryRegion *root;
2491 AddressSpace *as;
2492 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002493 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002494 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002495
Paolo Bonzini73034e92013-05-07 15:48:28 +02002496 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002497 for (root = mr; root->container; ) {
2498 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002499 addr += root->addr;
2500 }
2501
2502 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002503 if (!as) {
2504 return ret;
2505 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002506 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002507
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002508 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002509 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002510 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002511 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002512 }
2513
Paolo Bonzini99e86342013-05-06 10:26:13 +02002514 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002515 --fr;
2516 }
2517
2518 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002519 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002520 range = addrrange_intersection(range, fr->addr);
2521 ret.offset_within_region = fr->offset_in_region;
2522 ret.offset_within_region += int128_get64(int128_sub(range.start,
2523 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002524 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002525 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002526 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002527 return ret;
2528}
2529
2530MemoryRegionSection memory_region_find(MemoryRegion *mr,
2531 hwaddr addr, uint64_t size)
2532{
2533 MemoryRegionSection ret;
2534 rcu_read_lock();
2535 ret = memory_region_find_rcu(mr, addr, size);
2536 if (ret.mr) {
2537 memory_region_ref(ret.mr);
2538 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002539 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002540 return ret;
2541}
2542
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002543bool memory_region_present(MemoryRegion *container, hwaddr addr)
2544{
2545 MemoryRegion *mr;
2546
2547 rcu_read_lock();
2548 mr = memory_region_find_rcu(container, addr, 1).mr;
2549 rcu_read_unlock();
2550 return mr && mr != container;
2551}
2552
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002553void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002554{
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002555 memory_region_sync_dirty_bitmap(NULL);
Avi Kivity7664e802011-12-11 14:47:25 +02002556}
2557
Jay Zhou19310762017-07-28 18:28:53 +08002558static VMChangeStateEntry *vmstate_change;
2559
Avi Kivity7664e802011-12-11 14:47:25 +02002560void memory_global_dirty_log_start(void)
2561{
Jay Zhou19310762017-07-28 18:28:53 +08002562 if (vmstate_change) {
2563 qemu_del_vm_change_state_handler(vmstate_change);
2564 vmstate_change = NULL;
2565 }
2566
Avi Kivity7664e802011-12-11 14:47:25 +02002567 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002568
Avi Kivity7376e582012-02-08 21:05:17 +02002569 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002570
2571 /* Refresh DIRTY_LOG_MIGRATION bit. */
2572 memory_region_transaction_begin();
2573 memory_region_update_pending = true;
2574 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002575}
2576
Jay Zhou19310762017-07-28 18:28:53 +08002577static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002578{
Avi Kivity7664e802011-12-11 14:47:25 +02002579 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002580
2581 /* Refresh DIRTY_LOG_MIGRATION bit. */
2582 memory_region_transaction_begin();
2583 memory_region_update_pending = true;
2584 memory_region_transaction_commit();
2585
Avi Kivity7376e582012-02-08 21:05:17 +02002586 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002587}
2588
Jay Zhou19310762017-07-28 18:28:53 +08002589static void memory_vm_change_state_handler(void *opaque, int running,
2590 RunState state)
2591{
2592 if (running) {
2593 memory_global_dirty_log_do_stop();
2594
2595 if (vmstate_change) {
2596 qemu_del_vm_change_state_handler(vmstate_change);
2597 vmstate_change = NULL;
2598 }
2599 }
2600}
2601
2602void memory_global_dirty_log_stop(void)
2603{
2604 if (!runstate_is_running()) {
2605 if (vmstate_change) {
2606 return;
2607 }
2608 vmstate_change = qemu_add_vm_change_state_handler(
2609 memory_vm_change_state_handler, NULL);
2610 return;
2611 }
2612
2613 memory_global_dirty_log_do_stop();
2614}
2615
Avi Kivity7664e802011-12-11 14:47:25 +02002616static void listener_add_address_space(MemoryListener *listener,
2617 AddressSpace *as)
2618{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002619 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002620 FlatRange *fr;
2621
Paolo Bonzini680a4782015-11-02 09:23:52 +01002622 if (listener->begin) {
2623 listener->begin(listener);
2624 }
Avi Kivity7664e802011-12-11 14:47:25 +02002625 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002626 if (listener->log_global_start) {
2627 listener->log_global_start(listener);
2628 }
Avi Kivity7664e802011-12-11 14:47:25 +02002629 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002630
Paolo Bonzini856d7242013-05-06 11:57:21 +02002631 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002632 FOR_EACH_FLAT_RANGE(fr, view) {
David Hildenbrand279836f2017-10-16 16:43:02 +02002633 MemoryRegionSection section = section_from_flat_range(fr, view);
2634
Avi Kivity975aefe2012-10-02 16:39:57 +02002635 if (listener->region_add) {
2636 listener->region_add(listener, &section);
2637 }
David Hildenbrandae990e62017-10-16 16:42:56 +02002638 if (fr->dirty_log_mask && listener->log_start) {
2639 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2640 }
Avi Kivity7664e802011-12-11 14:47:25 +02002641 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002642 if (listener->commit) {
2643 listener->commit(listener);
2644 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002645 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002646}
2647
Peter Xud25836c2018-01-22 14:02:44 +08002648static void listener_del_address_space(MemoryListener *listener,
2649 AddressSpace *as)
2650{
2651 FlatView *view;
2652 FlatRange *fr;
2653
2654 if (listener->begin) {
2655 listener->begin(listener);
2656 }
2657 view = address_space_get_flatview(as);
2658 FOR_EACH_FLAT_RANGE(fr, view) {
2659 MemoryRegionSection section = section_from_flat_range(fr, view);
2660
2661 if (fr->dirty_log_mask && listener->log_stop) {
2662 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2663 }
2664 if (listener->region_del) {
2665 listener->region_del(listener, &section);
2666 }
2667 }
2668 if (listener->commit) {
2669 listener->commit(listener);
2670 }
2671 flatview_unref(view);
2672}
2673
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002674void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002675{
Avi Kivity72e22d22012-02-08 15:05:50 +02002676 MemoryListener *other = NULL;
2677
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002678 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002679 if (QTAILQ_EMPTY(&memory_listeners)
2680 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2681 memory_listeners)->priority) {
2682 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2683 } else {
2684 QTAILQ_FOREACH(other, &memory_listeners, link) {
2685 if (listener->priority < other->priority) {
2686 break;
2687 }
2688 }
2689 QTAILQ_INSERT_BEFORE(other, listener, link);
2690 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002691
Paolo Bonzini9a546352016-09-22 16:23:06 +02002692 if (QTAILQ_EMPTY(&as->listeners)
2693 || listener->priority >= QTAILQ_LAST(&as->listeners,
2694 memory_listeners)->priority) {
2695 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2696 } else {
2697 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2698 if (listener->priority < other->priority) {
2699 break;
2700 }
2701 }
2702 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2703 }
2704
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002705 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002706}
2707
2708void memory_listener_unregister(MemoryListener *listener)
2709{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002710 if (!listener->address_space) {
2711 return;
2712 }
2713
Peter Xud25836c2018-01-22 14:02:44 +08002714 listener_del_address_space(listener, listener->address_space);
Avi Kivity72e22d22012-02-08 15:05:50 +02002715 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002716 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002717 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002718}
Avi Kivitye2177952011-12-08 15:00:18 +02002719
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002720void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002721{
Paolo Bonziniac951902015-02-11 15:21:04 +01002722 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002723 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002724 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002725 as->ioeventfd_nb = 0;
2726 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002727 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002728 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002729 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002730 address_space_update_topology(as);
2731 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002732}
Avi Kivity658b2222011-07-26 14:26:08 +03002733
Paolo Bonzini374f2982013-05-17 12:37:03 +02002734static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002735{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002736 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002737
Paolo Bonzini856d7242013-05-06 11:57:21 +02002738 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002739 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002740 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002741 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002742}
2743
Paolo Bonzini374f2982013-05-17 12:37:03 +02002744void address_space_destroy(AddressSpace *as)
2745{
Paolo Bonziniac951902015-02-11 15:21:04 +01002746 MemoryRegion *root = as->root;
2747
Paolo Bonzini374f2982013-05-17 12:37:03 +02002748 /* Flush out anything from MemoryListeners listening in on this */
2749 memory_region_transaction_begin();
2750 as->root = NULL;
2751 memory_region_transaction_commit();
2752 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2753
2754 /* At this point, as->dispatch and as->current_map are dummy
2755 * entries that the guest should never use. Wait for the old
2756 * values to expire before freeing the data.
2757 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002758 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002759 call_rcu(as, do_address_space_destroy, rcu);
2760}
2761
Peter Xu4e831902017-01-16 16:40:04 +08002762static const char *memory_region_type(MemoryRegion *mr)
2763{
2764 if (memory_region_is_ram_device(mr)) {
2765 return "ramd";
2766 } else if (memory_region_is_romd(mr)) {
2767 return "romd";
2768 } else if (memory_region_is_rom(mr)) {
2769 return "rom";
2770 } else if (memory_region_is_ram(mr)) {
2771 return "ram";
2772 } else {
2773 return "i/o";
2774 }
2775}
2776
Blue Swirl314e2982011-09-11 20:22:05 +00002777typedef struct MemoryRegionList MemoryRegionList;
2778
2779struct MemoryRegionList {
2780 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002781 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002782};
2783
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002784typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002785
Peter Xu4e831902017-01-16 16:40:04 +08002786#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2787 int128_sub((size), int128_one())) : 0)
2788#define MTREE_INDENT " "
2789
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002790static void mtree_expand_owner(fprintf_function mon_printf, void *f,
2791 const char *label, Object *obj)
2792{
2793 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2794
2795 mon_printf(f, " %s:{%s", label, dev ? "dev" : "obj");
2796 if (dev && dev->id) {
2797 mon_printf(f, " id=%s", dev->id);
2798 } else {
2799 gchar *canonical_path = object_get_canonical_path(obj);
2800 if (canonical_path) {
2801 mon_printf(f, " path=%s", canonical_path);
2802 g_free(canonical_path);
2803 } else {
2804 mon_printf(f, " type=%s", object_get_typename(obj));
2805 }
2806 }
2807 mon_printf(f, "}");
2808}
2809
2810static void mtree_print_mr_owner(fprintf_function mon_printf, void *f,
2811 const MemoryRegion *mr)
2812{
2813 Object *owner = mr->owner;
2814 Object *parent = memory_region_owner((MemoryRegion *)mr);
2815
2816 if (!owner && !parent) {
2817 mon_printf(f, " orphan");
2818 return;
2819 }
2820 if (owner) {
2821 mtree_expand_owner(mon_printf, f, "owner", owner);
2822 }
2823 if (parent && parent != owner) {
2824 mtree_expand_owner(mon_printf, f, "parent", parent);
2825 }
2826}
2827
Blue Swirl314e2982011-09-11 20:22:05 +00002828static void mtree_print_mr(fprintf_function mon_printf, void *f,
2829 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002830 hwaddr base,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002831 MemoryRegionListHead *alias_print_queue,
2832 bool owner)
Blue Swirl314e2982011-09-11 20:22:05 +00002833{
Jan Kiszka9479c572011-09-27 15:00:41 +02002834 MemoryRegionList *new_ml, *ml, *next_ml;
2835 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002836 const MemoryRegion *submr;
2837 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002838 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002839
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002840 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002841 return;
2842 }
2843
2844 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002845 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002846 }
2847
Peter Xub31f8412017-03-14 20:56:27 +08002848 cur_start = base + mr->addr;
2849 cur_end = cur_start + MR_SIZE(mr->size);
2850
2851 /*
2852 * Try to detect overflow of memory region. This should never
2853 * happen normally. When it happens, we dump something to warn the
2854 * user who is observing this.
2855 */
2856 if (cur_start < base || cur_end < cur_start) {
2857 mon_printf(f, "[DETECTED OVERFLOW!] ");
2858 }
2859
Blue Swirl314e2982011-09-11 20:22:05 +00002860 if (mr->alias) {
2861 MemoryRegionList *ml;
2862 bool found = false;
2863
2864 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002865 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002866 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002867 found = true;
2868 }
2869 }
2870
2871 if (!found) {
2872 ml = g_new(MemoryRegionList, 1);
2873 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002874 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002875 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002876 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002877 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002878 "-" TARGET_FMT_plx "%s",
Peter Xub31f8412017-03-14 20:56:27 +08002879 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002880 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002881 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002882 memory_region_name(mr),
2883 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002884 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002885 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002886 mr->enabled ? "" : " [disabled]");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002887 if (owner) {
2888 mtree_print_mr_owner(mon_printf, f, mr);
2889 }
Blue Swirl314e2982011-09-11 20:22:05 +00002890 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002891 mon_printf(f,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002892 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s",
Peter Xub31f8412017-03-14 20:56:27 +08002893 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002894 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002895 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002896 memory_region_name(mr),
2897 mr->enabled ? "" : " [disabled]");
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002898 if (owner) {
2899 mtree_print_mr_owner(mon_printf, f, mr);
2900 }
Blue Swirl314e2982011-09-11 20:22:05 +00002901 }
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002902 mon_printf(f, "\n");
Jan Kiszka9479c572011-09-27 15:00:41 +02002903
2904 QTAILQ_INIT(&submr_print_queue);
2905
Blue Swirl314e2982011-09-11 20:22:05 +00002906 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002907 new_ml = g_new(MemoryRegionList, 1);
2908 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002909 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002910 if (new_ml->mr->addr < ml->mr->addr ||
2911 (new_ml->mr->addr == ml->mr->addr &&
2912 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002913 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002914 new_ml = NULL;
2915 break;
2916 }
2917 }
2918 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002919 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002920 }
2921 }
2922
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002923 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002924 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002925 alias_print_queue, owner);
Jan Kiszka9479c572011-09-27 15:00:41 +02002926 }
2927
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002928 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002929 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002930 }
2931}
2932
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002933struct FlatViewInfo {
2934 fprintf_function mon_printf;
2935 void *f;
2936 int counter;
2937 bool dispatch_tree;
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002938 bool owner;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002939};
2940
2941static void mtree_print_flatview(gpointer key, gpointer value,
2942 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08002943{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002944 FlatView *view = key;
2945 GArray *fv_address_spaces = value;
2946 struct FlatViewInfo *fvi = user_data;
2947 fprintf_function p = fvi->mon_printf;
2948 void *f = fvi->f;
Peter Xu57bb40c2017-01-16 16:40:05 +08002949 FlatRange *range = &view->ranges[0];
2950 MemoryRegion *mr;
2951 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002952 int i;
2953 AddressSpace *as;
2954
2955 p(f, "FlatView #%d\n", fvi->counter);
2956 ++fvi->counter;
2957
2958 for (i = 0; i < fv_address_spaces->len; ++i) {
2959 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2960 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2961 if (as->root->alias) {
2962 p(f, ", alias %s", memory_region_name(as->root->alias));
2963 }
2964 p(f, "\n");
2965 }
2966
2967 p(f, " Root memory region: %s\n",
2968 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08002969
2970 if (n <= 0) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002971 p(f, MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08002972 return;
2973 }
2974
2975 while (n--) {
2976 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002977 if (range->offset_in_region) {
2978 p(f, MTREE_INDENT TARGET_FMT_plx "-"
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002979 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx,
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002980 int128_get64(range->addr.start),
2981 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2982 mr->priority,
2983 range->readonly ? "rom" : memory_region_type(mr),
2984 memory_region_name(mr),
2985 range->offset_in_region);
2986 } else {
2987 p(f, MTREE_INDENT TARGET_FMT_plx "-"
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002988 TARGET_FMT_plx " (prio %d, %s): %s",
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002989 int128_get64(range->addr.start),
2990 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2991 mr->priority,
2992 range->readonly ? "rom" : memory_region_type(mr),
2993 memory_region_name(mr));
2994 }
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10002995 if (fvi->owner) {
2996 mtree_print_mr_owner(p, f, mr);
2997 }
2998 p(f, "\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08002999 range++;
3000 }
3001
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003002#if !defined(CONFIG_USER_ONLY)
3003 if (fvi->dispatch_tree && view->root) {
3004 mtree_print_dispatch(p, f, view->dispatch, view->root);
3005 }
3006#endif
3007
3008 p(f, "\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003009}
3010
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003011static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3012 gpointer user_data)
3013{
3014 FlatView *view = key;
3015 GArray *fv_address_spaces = value;
3016
3017 g_array_unref(fv_address_spaces);
3018 flatview_unref(view);
3019
3020 return true;
3021}
3022
3023void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003024 bool dispatch_tree, bool owner)
Blue Swirl314e2982011-09-11 20:22:05 +00003025{
3026 MemoryRegionListHead ml_head;
3027 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02003028 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00003029
Peter Xu57bb40c2017-01-16 16:40:05 +08003030 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003031 FlatView *view;
3032 struct FlatViewInfo fvi = {
3033 .mon_printf = mon_printf,
3034 .f = f,
3035 .counter = 0,
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003036 .dispatch_tree = dispatch_tree,
3037 .owner = owner,
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003038 };
3039 GArray *fv_address_spaces;
3040 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3041
3042 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003043 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003044 view = address_space_get_flatview(as);
3045
3046 fv_address_spaces = g_hash_table_lookup(views, view);
3047 if (!fv_address_spaces) {
3048 fv_address_spaces = g_array_new(false, false, sizeof(as));
3049 g_hash_table_insert(views, view, fv_address_spaces);
3050 }
3051
3052 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003053 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003054
3055 /* Print */
3056 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3057
3058 /* Free */
3059 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3060 g_hash_table_unref(views);
3061
Peter Xu57bb40c2017-01-16 16:40:05 +08003062 return;
3063 }
3064
Blue Swirl314e2982011-09-11 20:22:05 +00003065 QTAILQ_INIT(&ml_head);
3066
Avi Kivity0d673e32012-10-02 15:28:50 +02003067 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003068 mon_printf(f, "address-space: %s\n", as->name);
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003069 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head, owner);
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003070 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003071 }
3072
Blue Swirl314e2982011-09-11 20:22:05 +00003073 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003074 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003075 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
Alexey Kardashevskiyfc051ae2018-06-04 13:25:11 +10003076 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head, owner);
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003077 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003078 }
3079
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003080 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003081 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003082 }
Blue Swirl314e2982011-09-11 20:22:05 +00003083}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003084
Peter Maydellb08199c2017-07-07 15:42:51 +01003085void memory_region_init_ram(MemoryRegion *mr,
3086 struct Object *owner,
3087 const char *name,
3088 uint64_t size,
3089 Error **errp)
3090{
3091 DeviceState *owner_dev;
3092 Error *err = NULL;
3093
3094 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3095 if (err) {
3096 error_propagate(errp, err);
3097 return;
3098 }
3099 /* This will assert if owner is neither NULL nor a DeviceState.
3100 * We only want the owner here for the purposes of defining a
3101 * unique name for migration. TODO: Ideally we should implement
3102 * a naming scheme for Objects which are not DeviceStates, in
3103 * which case we can relax this restriction.
3104 */
3105 owner_dev = DEVICE(owner);
3106 vmstate_register_ram(mr, owner_dev);
3107}
3108
3109void memory_region_init_rom(MemoryRegion *mr,
3110 struct Object *owner,
3111 const char *name,
3112 uint64_t size,
3113 Error **errp)
3114{
3115 DeviceState *owner_dev;
3116 Error *err = NULL;
3117
3118 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3119 if (err) {
3120 error_propagate(errp, err);
3121 return;
3122 }
3123 /* This will assert if owner is neither NULL nor a DeviceState.
3124 * We only want the owner here for the purposes of defining a
3125 * unique name for migration. TODO: Ideally we should implement
3126 * a naming scheme for Objects which are not DeviceStates, in
3127 * which case we can relax this restriction.
3128 */
3129 owner_dev = DEVICE(owner);
3130 vmstate_register_ram(mr, owner_dev);
3131}
3132
3133void memory_region_init_rom_device(MemoryRegion *mr,
3134 struct Object *owner,
3135 const MemoryRegionOps *ops,
3136 void *opaque,
3137 const char *name,
3138 uint64_t size,
3139 Error **errp)
3140{
3141 DeviceState *owner_dev;
3142 Error *err = NULL;
3143
3144 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3145 name, size, &err);
3146 if (err) {
3147 error_propagate(errp, err);
3148 return;
3149 }
3150 /* This will assert if owner is neither NULL nor a DeviceState.
3151 * We only want the owner here for the purposes of defining a
3152 * unique name for migration. TODO: Ideally we should implement
3153 * a naming scheme for Objects which are not DeviceStates, in
3154 * which case we can relax this restriction.
3155 */
3156 owner_dev = DEVICE(owner);
3157 vmstate_register_ram(mr, owner_dev);
3158}
3159
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003160static const TypeInfo memory_region_info = {
3161 .parent = TYPE_OBJECT,
3162 .name = TYPE_MEMORY_REGION,
3163 .instance_size = sizeof(MemoryRegion),
3164 .instance_init = memory_region_initfn,
3165 .instance_finalize = memory_region_finalize,
3166};
3167
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003168static const TypeInfo iommu_memory_region_info = {
3169 .parent = TYPE_MEMORY_REGION,
3170 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003171 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003172 .instance_size = sizeof(IOMMUMemoryRegion),
3173 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003174 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003175};
3176
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003177static void memory_register_types(void)
3178{
3179 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003180 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003181}
3182
3183type_init(memory_register_types)