blob: 10fa2ddd31e507d803537d00489a14898b3831d2 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020042static bool global_dirty_log = false;
43
Avi Kivity72e22d22012-02-08 15:05:50 +020044static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +100050static GHashTable *flat_views;
51
Avi Kivity093bc2c2011-07-26 14:26:01 +030052typedef struct AddrRange AddrRange;
53
Avi Kivity8417ceb2011-08-03 11:56:14 +030054/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080055 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030056 * (large MemoryRegion::alias_offset).
57 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030058struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020059 Int128 start;
60 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030061};
62
Avi Kivity08dafab2011-10-16 13:19:17 +020063static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030064{
65 return (AddrRange) { start, size };
66}
67
68static bool addrrange_equal(AddrRange r1, AddrRange r2)
69{
Avi Kivity08dafab2011-10-16 13:19:17 +020070 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030071}
72
Avi Kivity08dafab2011-10-16 13:19:17 +020073static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030074{
Avi Kivity08dafab2011-10-16 13:19:17 +020075 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030076}
77
Avi Kivity08dafab2011-10-16 13:19:17 +020078static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030079{
Avi Kivity08dafab2011-10-16 13:19:17 +020080 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030081 return range;
82}
83
Avi Kivity08dafab2011-10-16 13:19:17 +020084static bool addrrange_contains(AddrRange range, Int128 addr)
85{
86 return int128_ge(addr, range.start)
87 && int128_lt(addr, addrrange_end(range));
88}
89
Avi Kivity093bc2c2011-07-26 14:26:01 +030090static bool addrrange_intersects(AddrRange r1, AddrRange r2)
91{
Avi Kivity08dafab2011-10-16 13:19:17 +020092 return addrrange_contains(r1, r2.start)
93 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030094}
95
96static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
97{
Avi Kivity08dafab2011-10-16 13:19:17 +020098 Int128 start = int128_max(r1.start, r2.start);
99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
100 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300101}
102
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200103enum ListenerDirection { Forward, Reverse };
104
Avi Kivity7376e582012-02-08 21:05:17 +0200105#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200106 do { \
107 MemoryListener *_listener; \
108 \
109 switch (_direction) { \
110 case Forward: \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
114 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200115 } \
116 break; \
117 case Reverse: \
118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
119 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
Paolo Bonzini9a546352016-09-22 16:23:06 +0200130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200131 do { \
132 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200133 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200134 \
135 switch (_direction) { \
136 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200137 QTAILQ_FOREACH(_listener, list, link_as) { \
138 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200139 _listener->_callback(_listener, _section, ##_args); \
140 } \
141 } \
142 break; \
143 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200144 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
145 link_as) { \
146 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200147 _listener->_callback(_listener, _section, ##_args); \
148 } \
149 } \
150 break; \
151 default: \
152 abort(); \
153 } \
154 } while (0)
155
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200156/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200157#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200158 do { \
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000159 MemoryRegionSection mrs = section_from_flat_range(fr, \
160 address_space_to_flatview(as)); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200161 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200162 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200163
Avi Kivity093bc2c2011-07-26 14:26:01 +0300164struct CoalescedMemoryRange {
165 AddrRange addr;
166 QTAILQ_ENTRY(CoalescedMemoryRange) link;
167};
168
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300169struct MemoryRegionIoeventfd {
170 AddrRange addr;
171 bool match_data;
172 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200173 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300174};
175
176static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
177 MemoryRegionIoeventfd b)
178{
Avi Kivity08dafab2011-10-16 13:19:17 +0200179 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200181 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200183 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300184 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200185 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300186 return false;
187 } else if (a.match_data < b.match_data) {
188 return true;
189 } else if (a.match_data > b.match_data) {
190 return false;
191 } else if (a.match_data) {
192 if (a.data < b.data) {
193 return true;
194 } else if (a.data > b.data) {
195 return false;
196 }
197 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200198 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300199 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200200 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300201 return false;
202 }
203 return false;
204}
205
206static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
207 MemoryRegionIoeventfd b)
208{
209 return !memory_region_ioeventfd_before(a, b)
210 && !memory_region_ioeventfd_before(b, a);
211}
212
Avi Kivity093bc2c2011-07-26 14:26:01 +0300213/* Range of memory in the global map. Addresses are absolute. */
214struct FlatRange {
215 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200216 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300217 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300218 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200219 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300220 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300221};
222
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300223typedef struct AddressSpaceOps AddressSpaceOps;
224
Avi Kivity093bc2c2011-07-26 14:26:01 +0300225#define FOR_EACH_FLAT_RANGE(var, view) \
226 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
227
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200228static inline MemoryRegionSection
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000229section_from_flat_range(FlatRange *fr, FlatView *fv)
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200230{
231 return (MemoryRegionSection) {
232 .mr = fr->mr,
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000233 .fv = fv,
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200234 .offset_within_region = fr->offset_in_region,
235 .size = fr->addr.size,
236 .offset_within_address_space = int128_get64(fr->addr.start),
237 .readonly = fr->readonly,
238 };
239}
240
Avi Kivity093bc2c2011-07-26 14:26:01 +0300241static bool flatrange_equal(FlatRange *a, FlatRange *b)
242{
243 return a->mr == b->mr
244 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300245 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200246 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300247 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300248}
249
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000250static FlatView *flatview_new(MemoryRegion *mr_root)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300251{
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000252 FlatView *view;
253
254 view = g_new0(FlatView, 1);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200255 view->ref = 1;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000256 view->root = mr_root;
257 memory_region_ref(mr_root);
Paolo Bonzini02d96512017-09-21 12:34:00 +0200258 trace_flatview_new(view, mr_root);
Alexey Kardashevskiycc94cd62017-09-21 18:50:55 +1000259
260 return view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300261}
262
263/* Insert a range into a given position. Caller is responsible for maintaining
264 * sorting order.
265 */
266static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
267{
268 if (view->nr == view->nr_allocated) {
269 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500270 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300271 view->nr_allocated * sizeof(*view->ranges));
272 }
273 memmove(view->ranges + pos + 1, view->ranges + pos,
274 (view->nr - pos) * sizeof(FlatRange));
275 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200276 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300277 ++view->nr;
278}
279
280static void flatview_destroy(FlatView *view)
281{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200282 int i;
283
Paolo Bonzini02d96512017-09-21 12:34:00 +0200284 trace_flatview_destroy(view, view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000285 if (view->dispatch) {
286 address_space_dispatch_free(view->dispatch);
287 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200288 for (i = 0; i < view->nr; i++) {
289 memory_region_unref(view->ranges[i].mr);
290 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500291 g_free(view->ranges);
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000292 memory_region_unref(view->root);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200293 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300294}
295
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200296static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200297{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200298 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200299}
300
Paolo Bonzini48564042018-03-18 18:26:36 +0100301void flatview_unref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200302{
303 if (atomic_fetch_dec(&view->ref) == 1) {
Paolo Bonzini02d96512017-09-21 12:34:00 +0200304 trace_flatview_destroy_rcu(view, view->root);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000305 assert(view->root);
Alexey Kardashevskiy66a6df12017-09-21 18:50:56 +1000306 call_rcu(view, flatview_destroy, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200307 }
308}
309
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300310static bool can_merge(FlatRange *r1, FlatRange *r2)
311{
Avi Kivity08dafab2011-10-16 13:19:17 +0200312 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300313 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200314 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
315 r1->addr.size),
316 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300317 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200318 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300319 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300320}
321
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000322/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300323static void flatview_simplify(FlatView *view)
324{
325 unsigned i, j;
326
327 i = 0;
328 while (i < view->nr) {
329 j = i + 1;
330 while (j < view->nr
331 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200332 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300333 ++j;
334 }
335 ++i;
336 memmove(&view->ranges[i], &view->ranges[j],
337 (view->nr - j) * sizeof(view->ranges[j]));
338 view->nr -= j - i;
339 }
340}
341
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200342static bool memory_region_big_endian(MemoryRegion *mr)
343{
344#ifdef TARGET_WORDS_BIGENDIAN
345 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
346#else
347 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
348#endif
349}
350
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200351static bool memory_region_wrong_endianness(MemoryRegion *mr)
352{
353#ifdef TARGET_WORDS_BIGENDIAN
354 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
355#else
356 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
357#endif
358}
359
360static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
361{
362 if (memory_region_wrong_endianness(mr)) {
363 switch (size) {
364 case 1:
365 break;
366 case 2:
367 *data = bswap16(*data);
368 break;
369 case 4:
370 *data = bswap32(*data);
371 break;
372 case 8:
373 *data = bswap64(*data);
374 break;
375 default:
376 abort();
377 }
378 }
379}
380
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800381static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
382{
383 MemoryRegion *root;
384 hwaddr abs_addr = offset;
385
386 abs_addr += mr->addr;
387 for (root = mr; root->container; ) {
388 root = root->container;
389 abs_addr += root->addr;
390 }
391
392 return abs_addr;
393}
394
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800395static int get_cpu_index(void)
396{
397 if (current_cpu) {
398 return current_cpu->cpu_index;
399 }
400 return -1;
401}
402
Peter Maydellcc05c432015-04-26 16:49:23 +0100403static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
404 hwaddr addr,
405 uint64_t *value,
406 unsigned size,
407 unsigned shift,
408 uint64_t mask,
409 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200410{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200411 uint64_t tmp;
412
413 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800414 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800415 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800416 } else if (mr == &io_mem_notdirty) {
417 /* Accesses to code which has previously been translated into a TB show
418 * up in the MMIO path, as accesses to the io_mem_notdirty
419 * MemoryRegion. */
420 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800421 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
422 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800423 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800424 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200425 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100426 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200427}
428
Peter Maydellcc05c432015-04-26 16:49:23 +0100429static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
430 hwaddr addr,
431 uint64_t *value,
432 unsigned size,
433 unsigned shift,
434 uint64_t mask,
435 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300436{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300437 uint64_t tmp;
438
439 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800440 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800441 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800442 } else if (mr == &io_mem_notdirty) {
443 /* Accesses to code which has previously been translated into a TB show
444 * up in the MMIO path, as accesses to the io_mem_notdirty
445 * MemoryRegion. */
446 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800447 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
448 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800449 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800450 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300451 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100452 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300453}
454
Peter Maydellcc05c432015-04-26 16:49:23 +0100455static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
456 hwaddr addr,
457 uint64_t *value,
458 unsigned size,
459 unsigned shift,
460 uint64_t mask,
461 MemTxAttrs attrs)
462{
463 uint64_t tmp = 0;
464 MemTxResult r;
465
Peter Maydellcc05c432015-04-26 16:49:23 +0100466 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800467 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800468 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800469 } else if (mr == &io_mem_notdirty) {
470 /* Accesses to code which has previously been translated into a TB show
471 * up in the MMIO path, as accesses to the io_mem_notdirty
472 * MemoryRegion. */
473 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800474 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
475 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800476 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800477 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100478 *value |= (tmp & mask) << shift;
479 return r;
480}
481
482static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
483 hwaddr addr,
484 uint64_t *value,
485 unsigned size,
486 unsigned shift,
487 uint64_t mask,
488 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200489{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200490 uint64_t tmp;
491
492 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800493 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800494 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800495 } else if (mr == &io_mem_notdirty) {
496 /* Accesses to code which has previously been translated into a TB show
497 * up in the MMIO path, as accesses to the io_mem_notdirty
498 * MemoryRegion. */
499 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800500 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
501 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800502 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800503 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200504 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100505 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200506}
507
Peter Maydellcc05c432015-04-26 16:49:23 +0100508static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
509 hwaddr addr,
510 uint64_t *value,
511 unsigned size,
512 unsigned shift,
513 uint64_t mask,
514 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300515{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300516 uint64_t tmp;
517
518 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800519 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800520 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800521 } else if (mr == &io_mem_notdirty) {
522 /* Accesses to code which has previously been translated into a TB show
523 * up in the MMIO path, as accesses to the io_mem_notdirty
524 * MemoryRegion. */
525 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800526 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
527 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800528 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800529 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300530 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100531 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300532}
533
Peter Maydellcc05c432015-04-26 16:49:23 +0100534static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
535 hwaddr addr,
536 uint64_t *value,
537 unsigned size,
538 unsigned shift,
539 uint64_t mask,
540 MemTxAttrs attrs)
541{
542 uint64_t tmp;
543
Peter Maydellcc05c432015-04-26 16:49:23 +0100544 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800545 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800546 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800547 } else if (mr == &io_mem_notdirty) {
548 /* Accesses to code which has previously been translated into a TB show
549 * up in the MMIO path, as accesses to the io_mem_notdirty
550 * MemoryRegion. */
551 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800552 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
553 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800554 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800555 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100556 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
557}
558
559static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300560 uint64_t *value,
561 unsigned size,
562 unsigned access_size_min,
563 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200564 MemTxResult (*access_fn)
565 (MemoryRegion *mr,
566 hwaddr addr,
567 uint64_t *value,
568 unsigned size,
569 unsigned shift,
570 uint64_t mask,
571 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100572 MemoryRegion *mr,
573 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300574{
575 uint64_t access_mask;
576 unsigned access_size;
577 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100578 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300579
580 if (!access_size_min) {
581 access_size_min = 1;
582 }
583 if (!access_size_max) {
584 access_size_max = 4;
585 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200586
587 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300588 access_size = MAX(MIN(size, access_size_max), access_size_min);
589 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200590 if (memory_region_big_endian(mr)) {
591 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200592 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100593 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200594 }
595 } else {
596 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200597 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100598 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200599 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300600 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100601 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300602}
603
Avi Kivitye2177952011-12-08 15:00:18 +0200604static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
605{
Avi Kivity0d673e32012-10-02 15:28:50 +0200606 AddressSpace *as;
607
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200608 while (mr->container) {
609 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200610 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200611 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
612 if (mr == as->root) {
613 return as;
614 }
Avi Kivitye2177952011-12-08 15:00:18 +0200615 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200616 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200617}
618
Avi Kivity093bc2c2011-07-26 14:26:01 +0300619/* Render a memory region into the global view. Ranges in @view obscure
620 * ranges in @mr.
621 */
622static void render_memory_region(FlatView *view,
623 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200624 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300625 AddrRange clip,
626 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300627{
628 MemoryRegion *subregion;
629 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200630 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200631 Int128 remain;
632 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300633 FlatRange fr;
634 AddrRange tmp;
635
Avi Kivity6bba19b2011-09-14 11:54:58 +0300636 if (!mr->enabled) {
637 return;
638 }
639
Avi Kivity08dafab2011-10-16 13:19:17 +0200640 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300641 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300642
643 tmp = addrrange_make(base, mr->size);
644
645 if (!addrrange_intersects(tmp, clip)) {
646 return;
647 }
648
649 clip = addrrange_intersection(tmp, clip);
650
651 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200652 int128_subfrom(&base, int128_make64(mr->alias->addr));
653 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300654 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300655 return;
656 }
657
658 /* Render subregions in priority order. */
659 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300660 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300661 }
662
Avi Kivity14a3c102011-07-26 14:26:06 +0300663 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300664 return;
665 }
666
Avi Kivity08dafab2011-10-16 13:19:17 +0200667 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300668 base = clip.start;
669 remain = clip.size;
670
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000671 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100672 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200673 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000674 fr.readonly = readonly;
675
Avi Kivity093bc2c2011-07-26 14:26:01 +0300676 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200677 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
678 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300679 continue;
680 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200681 if (int128_lt(base, view->ranges[i].addr.start)) {
682 now = int128_min(remain,
683 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300684 fr.offset_in_region = offset_in_region;
685 fr.addr = addrrange_make(base, now);
686 flatview_insert(view, i, &fr);
687 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200688 int128_addto(&base, now);
689 offset_in_region += int128_get64(now);
690 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300691 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200692 now = int128_sub(int128_min(int128_add(base, remain),
693 addrrange_end(view->ranges[i].addr)),
694 base);
695 int128_addto(&base, now);
696 offset_in_region += int128_get64(now);
697 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300698 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200699 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300700 fr.offset_in_region = offset_in_region;
701 fr.addr = addrrange_make(base, remain);
702 flatview_insert(view, i, &fr);
703 }
704}
705
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000706static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
707{
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200708 while (mr->enabled) {
709 if (mr->alias) {
710 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
711 /* The alias is included in its entirety. Use it as
712 * the "real" root, so that we can share more FlatViews.
713 */
714 mr = mr->alias;
715 continue;
716 }
717 } else if (!mr->terminates) {
718 unsigned int found = 0;
719 MemoryRegion *child, *next = NULL;
720 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
721 if (child->enabled) {
722 if (++found > 1) {
723 next = NULL;
724 break;
725 }
726 if (!child->addr && int128_ge(mr->size, child->size)) {
727 /* A child is included in its entirety. If it's the only
728 * enabled one, use it in the hope of finding an alias down the
729 * way. This will also let us share FlatViews.
730 */
731 next = child;
732 }
733 }
734 }
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000735 if (found == 0) {
736 return NULL;
737 }
Paolo Bonzinie673ba92017-09-21 12:28:16 +0200738 if (next) {
739 mr = next;
740 continue;
741 }
742 }
743
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000744 return mr;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000745 }
746
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000747 return NULL;
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000748}
749
Avi Kivity093bc2c2011-07-26 14:26:01 +0300750/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200751static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300752{
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000753 int i;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200754 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300755
Alexey Kardashevskiy89c177b2017-09-21 18:51:01 +1000756 view = flatview_new(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300757
Avi Kivity83f3c252012-10-07 12:59:55 +0200758 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200759 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200760 addrrange_make(int128_zero(), int128_2_64()), false);
761 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200762 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300763
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000764 view->dispatch = address_space_dispatch_new(view);
765 for (i = 0; i < view->nr; i++) {
766 MemoryRegionSection mrs =
767 section_from_flat_range(&view->ranges[i], view);
768 flatview_add_to_dispatch(view, &mrs);
769 }
770 address_space_dispatch_compact(view->dispatch);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000771 g_hash_table_replace(flat_views, mr, view);
Alexey Kardashevskiy9bf561e2017-09-21 18:51:02 +1000772
Avi Kivity093bc2c2011-07-26 14:26:01 +0300773 return view;
774}
775
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300776static void address_space_add_del_ioeventfds(AddressSpace *as,
777 MemoryRegionIoeventfd *fds_new,
778 unsigned fds_new_nb,
779 MemoryRegionIoeventfd *fds_old,
780 unsigned fds_old_nb)
781{
782 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200783 MemoryRegionIoeventfd *fd;
784 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300785
786 /* Generate a symmetric difference of the old and new fd sets, adding
787 * and deleting as necessary.
788 */
789
790 iold = inew = 0;
791 while (iold < fds_old_nb || inew < fds_new_nb) {
792 if (iold < fds_old_nb
793 && (inew == fds_new_nb
794 || memory_region_ioeventfd_before(fds_old[iold],
795 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200796 fd = &fds_old[iold];
797 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000798 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200799 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200800 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200801 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200802 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200803 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300804 ++iold;
805 } else if (inew < fds_new_nb
806 && (iold == fds_old_nb
807 || memory_region_ioeventfd_before(fds_new[inew],
808 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200809 fd = &fds_new[inew];
810 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000811 .fv = address_space_to_flatview(as),
Avi Kivity80a1ea32012-02-08 16:39:06 +0200812 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200813 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200814 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200815 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200816 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300817 ++inew;
818 } else {
819 ++iold;
820 ++inew;
821 }
822 }
823}
824
Paolo Bonzini48564042018-03-18 18:26:36 +0100825FlatView *address_space_get_flatview(AddressSpace *as)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200826{
827 FlatView *view;
828
Paolo Bonzini374f2982013-05-17 12:37:03 +0200829 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200830 do {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +1000831 view = address_space_to_flatview(as);
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200832 /* If somebody has replaced as->current_map concurrently,
833 * flatview_ref returns false.
834 */
835 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200836 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200837 return view;
838}
839
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300840static void address_space_update_ioeventfds(AddressSpace *as)
841{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200842 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300843 FlatRange *fr;
844 unsigned ioeventfd_nb = 0;
845 MemoryRegionIoeventfd *ioeventfds = NULL;
846 AddrRange tmp;
847 unsigned i;
848
Paolo Bonzini856d7242013-05-06 11:57:21 +0200849 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200850 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300851 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
852 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200853 int128_sub(fr->addr.start,
854 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300855 if (addrrange_intersects(fr->addr, tmp)) {
856 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500857 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300858 ioeventfd_nb * sizeof(*ioeventfds));
859 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
860 ioeventfds[ioeventfd_nb-1].addr = tmp;
861 }
862 }
863 }
864
865 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
866 as->ioeventfds, as->ioeventfd_nb);
867
Anthony Liguori7267c092011-08-20 22:09:37 -0500868 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300869 as->ioeventfds = ioeventfds;
870 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200871 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300872}
873
Avi Kivityb8af1af2011-07-26 14:26:12 +0300874static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200875 const FlatView *old_view,
876 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300877 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300878{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300879 unsigned iold, inew;
880 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300881
882 /* Generate a symmetric difference of the old and new memory maps.
883 * Kill ranges in the old map, and instantiate ranges in the new map.
884 */
885 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200886 while (iold < old_view->nr || inew < new_view->nr) {
887 if (iold < old_view->nr) {
888 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300889 } else {
890 frold = NULL;
891 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200892 if (inew < new_view->nr) {
893 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300894 } else {
895 frnew = NULL;
896 }
897
898 if (frold
899 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200900 || int128_lt(frold->addr.start, frnew->addr.start)
901 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300902 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000903 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300904
Avi Kivityb8af1af2011-07-26 14:26:12 +0300905 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200906 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300907 }
908
Avi Kivity093bc2c2011-07-26 14:26:01 +0300909 ++iold;
910 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000911 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300912
Avi Kivityb8af1af2011-07-26 14:26:12 +0300913 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200914 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200915 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
916 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
917 frold->dirty_log_mask,
918 frnew->dirty_log_mask);
919 }
920 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
921 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
922 frold->dirty_log_mask,
923 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300924 }
Avi Kivity5a583342011-07-26 14:26:02 +0300925 }
926
Avi Kivity093bc2c2011-07-26 14:26:01 +0300927 ++iold;
928 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300929 } else {
930 /* In new */
931
Avi Kivityb8af1af2011-07-26 14:26:12 +0300932 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200933 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300934 }
935
Avi Kivity093bc2c2011-07-26 14:26:01 +0300936 ++inew;
937 }
938 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300939}
940
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000941static void flatviews_init(void)
942{
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000943 static FlatView *empty_view;
944
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000945 if (flat_views) {
946 return;
947 }
948
949 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
950 (GDestroyNotify) flatview_unref);
Alexey Kardashevskiy092aa2f2017-09-21 18:51:07 +1000951 if (!empty_view) {
952 empty_view = generate_memory_topology(NULL);
953 /* We keep it alive forever in the global variable. */
954 flatview_ref(empty_view);
955 } else {
956 g_hash_table_replace(flat_views, NULL, empty_view);
957 flatview_ref(empty_view);
958 }
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000959}
960
961static void flatviews_reset(void)
962{
963 AddressSpace *as;
964
965 if (flat_views) {
966 g_hash_table_unref(flat_views);
967 flat_views = NULL;
968 }
969 flatviews_init();
970
971 /* Render unique FVs */
972 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
973 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
974
975 if (g_hash_table_lookup(flat_views, physmr)) {
976 continue;
977 }
978
979 generate_memory_topology(physmr);
980 }
981}
982
983static void address_space_set_flatview(AddressSpace *as)
Avi Kivityb8af1af2011-07-26 14:26:12 +0300984{
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000985 FlatView *old_view = address_space_to_flatview(as);
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000986 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
987 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
988
989 assert(new_view);
990
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +1000991 if (old_view == new_view) {
992 return;
993 }
994
995 if (old_view) {
996 flatview_ref(old_view);
997 }
998
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +1000999 flatview_ref(new_view);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001000
1001 if (!QTAILQ_EMPTY(&as->listeners)) {
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001002 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1003
1004 if (!old_view2) {
1005 old_view2 = &tmpview;
1006 }
1007 address_space_update_topology_pass(as, old_view2, new_view, false);
1008 address_space_update_topology_pass(as, old_view2, new_view, true);
Alexey Kardashevskiy9a62e242017-09-21 18:50:54 +10001009 }
Avi Kivityb8af1af2011-07-26 14:26:12 +03001010
Paolo Bonzini374f2982013-05-17 12:37:03 +02001011 /* Writes are protected by the BQL. */
1012 atomic_rcu_set(&as->current_map, new_view);
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001013 if (old_view) {
1014 flatview_unref(old_view);
1015 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001016
1017 /* Note that all the old MemoryRegions are still alive up to this
1018 * point. This relieves most MemoryListeners from the need to
1019 * ref/unref the MemoryRegions they get---unless they use them
1020 * outside the iothread mutex, in which case precise reference
1021 * counting is necessary.
1022 */
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10001023 if (old_view) {
1024 flatview_unref(old_view);
1025 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001026}
1027
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10001028static void address_space_update_topology(AddressSpace *as)
1029{
1030 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1031
1032 flatviews_init();
1033 if (!g_hash_table_lookup(flat_views, physmr)) {
1034 generate_memory_topology(physmr);
1035 }
1036 address_space_set_flatview(as);
1037}
1038
Avi Kivity4ef4db82011-07-26 14:26:13 +03001039void memory_region_transaction_begin(void)
1040{
Jan Kiszkabb880de2012-08-23 13:02:32 +02001041 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +03001042 ++memory_region_transaction_depth;
1043}
1044
1045void memory_region_transaction_commit(void)
1046{
Avi Kivity0d673e32012-10-02 15:28:50 +02001047 AddressSpace *as;
1048
Avi Kivity4ef4db82011-07-26 14:26:13 +03001049 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001050 assert(qemu_mutex_iothread_locked());
1051
Avi Kivity4ef4db82011-07-26 14:26:13 +03001052 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +08001053 if (!memory_region_transaction_depth) {
1054 if (memory_region_update_pending) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001055 flatviews_reset();
1056
Gonglei4dc56152014-05-08 11:47:32 +08001057 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +02001058
Gonglei4dc56152014-05-08 11:47:32 +08001059 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy967dc9b2017-09-21 18:51:04 +10001060 address_space_set_flatview(as);
Alexey Kardashevskiy02218482017-09-21 18:51:03 +10001061 address_space_update_ioeventfds(as);
Gonglei4dc56152014-05-08 11:47:32 +08001062 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001063 memory_region_update_pending = false;
linzhecheng0b152092018-01-14 20:55:19 +08001064 ioeventfd_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +08001065 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1066 } else if (ioeventfd_update_pending) {
1067 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1068 address_space_update_ioeventfds(as);
1069 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +00001070 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +02001071 }
Gonglei4dc56152014-05-08 11:47:32 +08001072 }
Avi Kivity4ef4db82011-07-26 14:26:13 +03001073}
1074
Avi Kivity545e92e2011-08-08 19:58:48 +03001075static void memory_region_destructor_none(MemoryRegion *mr)
1076{
1077}
1078
1079static void memory_region_destructor_ram(MemoryRegion *mr)
1080{
Fam Zhengf1060c52016-03-01 14:18:22 +08001081 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +03001082}
1083
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001084static bool memory_region_need_escape(char c)
1085{
1086 return c == '/' || c == '[' || c == '\\' || c == ']';
1087}
1088
1089static char *memory_region_escape_name(const char *name)
1090{
1091 const char *p;
1092 char *escaped, *q;
1093 uint8_t c;
1094 size_t bytes = 0;
1095
1096 for (p = name; *p; p++) {
1097 bytes += memory_region_need_escape(*p) ? 4 : 1;
1098 }
1099 if (bytes == p - name) {
1100 return g_memdup(name, bytes + 1);
1101 }
1102
1103 escaped = g_malloc(bytes + 1);
1104 for (p = name, q = escaped; *p; p++) {
1105 c = *p;
1106 if (unlikely(memory_region_need_escape(c))) {
1107 *q++ = '\\';
1108 *q++ = 'x';
1109 *q++ = "0123456789abcdef"[c >> 4];
1110 c = "0123456789abcdef"[c & 15];
1111 }
1112 *q++ = c;
1113 }
1114 *q = 0;
1115 return escaped;
1116}
1117
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001118static void memory_region_do_init(MemoryRegion *mr,
1119 Object *owner,
1120 const char *name,
1121 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001122{
Avi Kivity08dafab2011-10-16 13:19:17 +02001123 mr->size = int128_make64(size);
1124 if (size == UINT64_MAX) {
1125 mr->size = int128_2_64();
1126 }
Peter Maydell302fa282014-08-19 20:05:46 +01001127 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001128 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +08001129 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001130
1131 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001132 char *escaped_name = memory_region_escape_name(name);
1133 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001134
1135 if (!owner) {
1136 owner = container_get(qdev_get_machine(), "/unattached");
1137 }
1138
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001139 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001140 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001141 g_free(name_array);
1142 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001143 }
1144}
1145
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001146void memory_region_init(MemoryRegion *mr,
1147 Object *owner,
1148 const char *name,
1149 uint64_t size)
1150{
1151 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1152 memory_region_do_init(mr, owner, name, size);
1153}
1154
Eric Blaked7bce992016-01-29 06:48:55 -07001155static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1156 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001157{
1158 MemoryRegion *mr = MEMORY_REGION(obj);
1159 uint64_t value = mr->addr;
1160
Eric Blake51e72bc2016-01-29 06:48:54 -07001161 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001162}
1163
Eric Blaked7bce992016-01-29 06:48:55 -07001164static void memory_region_get_container(Object *obj, Visitor *v,
1165 const char *name, void *opaque,
1166 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001167{
1168 MemoryRegion *mr = MEMORY_REGION(obj);
1169 gchar *path = (gchar *)"";
1170
1171 if (mr->container) {
1172 path = object_get_canonical_path(OBJECT(mr->container));
1173 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001174 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001175 if (mr->container) {
1176 g_free(path);
1177 }
1178}
1179
1180static Object *memory_region_resolve_container(Object *obj, void *opaque,
1181 const char *part)
1182{
1183 MemoryRegion *mr = MEMORY_REGION(obj);
1184
1185 return OBJECT(mr->container);
1186}
1187
Eric Blaked7bce992016-01-29 06:48:55 -07001188static void memory_region_get_priority(Object *obj, Visitor *v,
1189 const char *name, void *opaque,
1190 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001191{
1192 MemoryRegion *mr = MEMORY_REGION(obj);
1193 int32_t value = mr->priority;
1194
Eric Blake51e72bc2016-01-29 06:48:54 -07001195 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001196}
1197
Eric Blaked7bce992016-01-29 06:48:55 -07001198static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1199 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001200{
1201 MemoryRegion *mr = MEMORY_REGION(obj);
1202 uint64_t value = memory_region_size(mr);
1203
Eric Blake51e72bc2016-01-29 06:48:54 -07001204 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001205}
1206
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001207static void memory_region_initfn(Object *obj)
1208{
1209 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001210 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001211
1212 mr->ops = &unassigned_mem_ops;
1213 mr->enabled = true;
1214 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001215 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001216 mr->destructor = memory_region_destructor_none;
1217 QTAILQ_INIT(&mr->subregions);
1218 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001219
1220 op = object_property_add(OBJECT(mr), "container",
1221 "link<" TYPE_MEMORY_REGION ">",
1222 memory_region_get_container,
1223 NULL, /* memory_region_set_container */
1224 NULL, NULL, &error_abort);
1225 op->resolve = memory_region_resolve_container;
1226
1227 object_property_add(OBJECT(mr), "addr", "uint64",
1228 memory_region_get_addr,
1229 NULL, /* memory_region_set_addr */
1230 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001231 object_property_add(OBJECT(mr), "priority", "uint32",
1232 memory_region_get_priority,
1233 NULL, /* memory_region_set_priority */
1234 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001235 object_property_add(OBJECT(mr), "size", "uint64",
1236 memory_region_get_size,
1237 NULL, /* memory_region_set_size, */
1238 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001239}
1240
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001241static void iommu_memory_region_initfn(Object *obj)
1242{
1243 MemoryRegion *mr = MEMORY_REGION(obj);
1244
1245 mr->is_iommu = true;
1246}
1247
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001248static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1249 unsigned size)
1250{
1251#ifdef DEBUG_UNASSIGNED
1252 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1253#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001254 if (current_cpu != NULL) {
1255 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001256 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001257 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001258}
1259
1260static void unassigned_mem_write(void *opaque, hwaddr addr,
1261 uint64_t val, unsigned size)
1262{
1263#ifdef DEBUG_UNASSIGNED
1264 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1265#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001266 if (current_cpu != NULL) {
1267 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001268 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001269}
1270
Paolo Bonzinid1970632013-05-24 13:23:38 +02001271static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
Peter Maydell8372d382018-05-31 14:50:52 +01001272 unsigned size, bool is_write,
1273 MemTxAttrs attrs)
Paolo Bonzinid1970632013-05-24 13:23:38 +02001274{
1275 return false;
1276}
1277
1278const MemoryRegionOps unassigned_mem_ops = {
1279 .valid.accepts = unassigned_mem_accepts,
1280 .endianness = DEVICE_NATIVE_ENDIAN,
1281};
1282
Alex Williamson4a2e2422016-10-31 09:53:03 -06001283static uint64_t memory_region_ram_device_read(void *opaque,
1284 hwaddr addr, unsigned size)
1285{
1286 MemoryRegion *mr = opaque;
1287 uint64_t data = (uint64_t)~0;
1288
1289 switch (size) {
1290 case 1:
1291 data = *(uint8_t *)(mr->ram_block->host + addr);
1292 break;
1293 case 2:
1294 data = *(uint16_t *)(mr->ram_block->host + addr);
1295 break;
1296 case 4:
1297 data = *(uint32_t *)(mr->ram_block->host + addr);
1298 break;
1299 case 8:
1300 data = *(uint64_t *)(mr->ram_block->host + addr);
1301 break;
1302 }
1303
1304 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1305
1306 return data;
1307}
1308
1309static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1310 uint64_t data, unsigned size)
1311{
1312 MemoryRegion *mr = opaque;
1313
1314 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1315
1316 switch (size) {
1317 case 1:
1318 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1319 break;
1320 case 2:
1321 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1322 break;
1323 case 4:
1324 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1325 break;
1326 case 8:
1327 *(uint64_t *)(mr->ram_block->host + addr) = data;
1328 break;
1329 }
1330}
1331
1332static const MemoryRegionOps ram_device_mem_ops = {
1333 .read = memory_region_ram_device_read,
1334 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001335 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001336 .valid = {
1337 .min_access_size = 1,
1338 .max_access_size = 8,
1339 .unaligned = true,
1340 },
1341 .impl = {
1342 .min_access_size = 1,
1343 .max_access_size = 8,
1344 .unaligned = true,
1345 },
1346};
1347
Paolo Bonzinid2702032013-05-24 11:55:06 +02001348bool memory_region_access_valid(MemoryRegion *mr,
1349 hwaddr addr,
1350 unsigned size,
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001351 bool is_write,
1352 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001353{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001354 int access_size_min, access_size_max;
1355 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001356
Avi Kivity093bc2c2011-07-26 14:26:01 +03001357 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1358 return false;
1359 }
1360
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001361 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001362 return true;
1363 }
1364
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001365 access_size_min = mr->ops->valid.min_access_size;
1366 if (!mr->ops->valid.min_access_size) {
1367 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001368 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001369
1370 access_size_max = mr->ops->valid.max_access_size;
1371 if (!mr->ops->valid.max_access_size) {
1372 access_size_max = 4;
1373 }
1374
1375 access_size = MAX(MIN(size, access_size_max), access_size_min);
1376 for (i = 0; i < size; i += access_size) {
1377 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
Peter Maydell8372d382018-05-31 14:50:52 +01001378 is_write, attrs)) {
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001379 return false;
1380 }
1381 }
1382
Avi Kivity093bc2c2011-07-26 14:26:01 +03001383 return true;
1384}
1385
Peter Maydellcc05c432015-04-26 16:49:23 +01001386static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1387 hwaddr addr,
1388 uint64_t *pval,
1389 unsigned size,
1390 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001391{
Peter Maydellcc05c432015-04-26 16:49:23 +01001392 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001393
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001394 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001395 return access_with_adjusted_size(addr, pval, size,
1396 mr->ops->impl.min_access_size,
1397 mr->ops->impl.max_access_size,
1398 memory_region_read_accessor,
1399 mr, attrs);
1400 } else if (mr->ops->read_with_attrs) {
1401 return access_with_adjusted_size(addr, pval, size,
1402 mr->ops->impl.min_access_size,
1403 mr->ops->impl.max_access_size,
1404 memory_region_read_with_attrs_accessor,
1405 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001406 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001407 return access_with_adjusted_size(addr, pval, size, 1, 4,
1408 memory_region_oldmmio_read_accessor,
1409 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001410 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001411}
1412
Peter Maydell3b643492015-04-26 16:49:23 +01001413MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1414 hwaddr addr,
1415 uint64_t *pval,
1416 unsigned size,
1417 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001418{
Peter Maydellcc05c432015-04-26 16:49:23 +01001419 MemTxResult r;
1420
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001421 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001422 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001423 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001424 }
Avi Kivitya621f382012-01-02 13:12:08 +02001425
Peter Maydellcc05c432015-04-26 16:49:23 +01001426 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001427 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001428 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001429}
1430
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001431/* Return true if an eventfd was signalled */
1432static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1433 hwaddr addr,
1434 uint64_t data,
1435 unsigned size,
1436 MemTxAttrs attrs)
1437{
1438 MemoryRegionIoeventfd ioeventfd = {
1439 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1440 .data = data,
1441 };
1442 unsigned i;
1443
1444 for (i = 0; i < mr->ioeventfd_nb; i++) {
1445 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1446 ioeventfd.e = mr->ioeventfds[i].e;
1447
1448 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1449 event_notifier_set(ioeventfd.e);
1450 return true;
1451 }
1452 }
1453
1454 return false;
1455}
1456
Peter Maydell3b643492015-04-26 16:49:23 +01001457MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1458 hwaddr addr,
1459 uint64_t data,
1460 unsigned size,
1461 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001462{
Peter Maydell6d7b9a62018-05-31 14:50:52 +01001463 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001464 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001465 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001466 }
1467
Avi Kivitya621f382012-01-02 13:12:08 +02001468 adjust_endianness(mr, &data, size);
1469
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001470 if ((!kvm_eventfds_enabled()) &&
1471 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1472 return MEMTX_OK;
1473 }
1474
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001475 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001476 return access_with_adjusted_size(addr, &data, size,
1477 mr->ops->impl.min_access_size,
1478 mr->ops->impl.max_access_size,
1479 memory_region_write_accessor, mr,
1480 attrs);
1481 } else if (mr->ops->write_with_attrs) {
1482 return
1483 access_with_adjusted_size(addr, &data, size,
1484 mr->ops->impl.min_access_size,
1485 mr->ops->impl.max_access_size,
1486 memory_region_write_with_attrs_accessor,
1487 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001488 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001489 return access_with_adjusted_size(addr, &data, size, 1, 4,
1490 memory_region_oldmmio_write_accessor,
1491 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001492 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001493}
1494
Avi Kivity093bc2c2011-07-26 14:26:01 +03001495void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001496 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001497 const MemoryRegionOps *ops,
1498 void *opaque,
1499 const char *name,
1500 uint64_t size)
1501{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001502 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001503 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001504 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001505 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001506}
1507
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001508void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1509 Object *owner,
1510 const char *name,
1511 uint64_t size,
1512 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001513{
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001514 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1515}
1516
1517void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1518 Object *owner,
1519 const char *name,
1520 uint64_t size,
1521 bool share,
1522 Error **errp)
1523{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001524 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001525 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001526 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001527 mr->destructor = memory_region_destructor_ram;
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001528 mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001529 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001530}
1531
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001532void memory_region_init_resizeable_ram(MemoryRegion *mr,
1533 Object *owner,
1534 const char *name,
1535 uint64_t size,
1536 uint64_t max_size,
1537 void (*resized)(const char*,
1538 uint64_t length,
1539 void *host),
1540 Error **errp)
1541{
1542 memory_region_init(mr, owner, name, size);
1543 mr->ram = true;
1544 mr->terminates = true;
1545 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001546 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1547 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001548 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001549}
1550
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001551#ifdef __linux__
1552void memory_region_init_ram_from_file(MemoryRegion *mr,
1553 struct Object *owner,
1554 const char *name,
1555 uint64_t size,
Haozhong Zhang98376842017-12-11 15:28:04 +08001556 uint64_t align,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001557 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001558 const char *path,
1559 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001560{
1561 memory_region_init(mr, owner, name, size);
1562 mr->ram = true;
1563 mr->terminates = true;
1564 mr->destructor = memory_region_destructor_ram;
Haozhong Zhang98376842017-12-11 15:28:04 +08001565 mr->align = align;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001566 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001567 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001568}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001569
1570void memory_region_init_ram_from_fd(MemoryRegion *mr,
1571 struct Object *owner,
1572 const char *name,
1573 uint64_t size,
1574 bool share,
1575 int fd,
1576 Error **errp)
1577{
1578 memory_region_init(mr, owner, name, size);
1579 mr->ram = true;
1580 mr->terminates = true;
1581 mr->destructor = memory_region_destructor_ram;
1582 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1583 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1584}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001585#endif
1586
Avi Kivity093bc2c2011-07-26 14:26:01 +03001587void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001588 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001589 const char *name,
1590 uint64_t size,
1591 void *ptr)
1592{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001593 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001594 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001595 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001596 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001597 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001598
1599 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1600 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001601 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001602}
1603
Alex Williamson21e00fa2016-10-31 09:53:03 -06001604void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1605 Object *owner,
1606 const char *name,
1607 uint64_t size,
1608 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301609{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001610 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1611 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001612 mr->ops = &ram_device_mem_ops;
1613 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301614}
1615
Avi Kivity093bc2c2011-07-26 14:26:01 +03001616void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001617 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001618 const char *name,
1619 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001620 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001621 uint64_t size)
1622{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001623 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001624 mr->alias = orig;
1625 mr->alias_offset = offset;
1626}
1627
Peter Maydellb59821a2017-07-07 15:42:50 +01001628void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1629 struct Object *owner,
1630 const char *name,
1631 uint64_t size,
1632 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001633{
1634 memory_region_init(mr, owner, name, size);
1635 mr->ram = true;
1636 mr->readonly = true;
1637 mr->terminates = true;
1638 mr->destructor = memory_region_destructor_ram;
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001639 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
Peter Maydella1777f72016-07-04 13:06:35 +01001640 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1641}
1642
Peter Maydellb59821a2017-07-07 15:42:50 +01001643void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1644 Object *owner,
1645 const MemoryRegionOps *ops,
1646 void *opaque,
1647 const char *name,
1648 uint64_t size,
1649 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001650{
Peter Maydell39e0b032016-07-04 13:06:35 +01001651 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001652 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001653 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001654 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001655 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001656 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001657 mr->destructor = memory_region_destructor_ram;
Marcel Apfelbaum06329cc2017-12-13 16:37:37 +02001658 mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001659}
1660
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001661void memory_region_init_iommu(void *_iommu_mr,
1662 size_t instance_size,
1663 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001664 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001665 const char *name,
1666 uint64_t size)
1667{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001668 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001669 struct MemoryRegion *mr;
1670
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001671 object_initialize(_iommu_mr, instance_size, mrtypename);
1672 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001673 memory_region_do_init(mr, owner, name, size);
1674 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001675 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001676 QLIST_INIT(&iommu_mr->iommu_notify);
1677 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001678}
1679
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001680static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001681{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001682 MemoryRegion *mr = MEMORY_REGION(obj);
1683
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001684 assert(!mr->container);
1685
1686 /* We know the region is not visible in any address space (it
1687 * does not have a container and cannot be a root either because
1688 * it has no references, so we can blindly clear mr->enabled.
1689 * memory_region_set_enabled instead could trigger a transaction
1690 * and cause an infinite loop.
1691 */
1692 mr->enabled = false;
1693 memory_region_transaction_begin();
1694 while (!QTAILQ_EMPTY(&mr->subregions)) {
1695 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1696 memory_region_del_subregion(mr, subregion);
1697 }
1698 memory_region_transaction_commit();
1699
Avi Kivity545e92e2011-08-08 19:58:48 +03001700 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001701 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001702 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001703 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001704}
1705
Paolo Bonzini803c0812013-05-07 06:59:09 +02001706Object *memory_region_owner(MemoryRegion *mr)
1707{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001708 Object *obj = OBJECT(mr);
1709 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001710}
1711
Paolo Bonzini46637be2013-05-07 09:06:00 +02001712void memory_region_ref(MemoryRegion *mr)
1713{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001714 /* MMIO callbacks most likely will access data that belongs
1715 * to the owner, hence the need to ref/unref the owner whenever
1716 * the memory region is in use.
1717 *
1718 * The memory region is a child of its owner. As long as the
1719 * owner doesn't call unparent itself on the memory region,
1720 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001721 * Memory regions without an owner are supposed to never go away;
1722 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001723 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001724 if (mr && mr->owner) {
1725 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001726 }
1727}
1728
1729void memory_region_unref(MemoryRegion *mr)
1730{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001731 if (mr && mr->owner) {
1732 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001733 }
1734}
1735
Avi Kivity093bc2c2011-07-26 14:26:01 +03001736uint64_t memory_region_size(MemoryRegion *mr)
1737{
Avi Kivity08dafab2011-10-16 13:19:17 +02001738 if (int128_eq(mr->size, int128_2_64())) {
1739 return UINT64_MAX;
1740 }
1741 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001742}
1743
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001744const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001745{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001746 if (!mr->name) {
1747 ((MemoryRegion *)mr)->name =
1748 object_get_canonical_path_component(OBJECT(mr));
1749 }
Peter Maydell302fa282014-08-19 20:05:46 +01001750 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001751}
1752
Alex Williamson21e00fa2016-10-31 09:53:03 -06001753bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301754{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001755 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301756}
1757
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001758uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001759{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001760 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001761 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001762 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1763 }
1764 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001765}
1766
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001767bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1768{
1769 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1770}
1771
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001772static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001773{
1774 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1775 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001776 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001777
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001778 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001779 flags |= iommu_notifier->notifier_flags;
1780 }
1781
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001782 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1783 imrc->notify_flag_changed(iommu_mr,
1784 iommu_mr->iommu_notify_flags,
1785 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001786 }
1787
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001788 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001789}
1790
Peter Xucdb30812016-09-23 13:02:26 +08001791void memory_region_register_iommu_notifier(MemoryRegion *mr,
1792 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001793{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001794 IOMMUMemoryRegion *iommu_mr;
1795
Jason Wangefcd38c2016-12-30 18:09:17 +08001796 if (mr->alias) {
1797 memory_region_register_iommu_notifier(mr->alias, n);
1798 return;
1799 }
1800
Peter Xucdb30812016-09-23 13:02:26 +08001801 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001802 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001803 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001804 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001805 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1806 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001807}
1808
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001809uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001810{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001811 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1812
1813 if (imrc->get_min_page_size) {
1814 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001815 }
1816 return TARGET_PAGE_SIZE;
1817}
1818
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001819void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001820{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001821 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001822 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001823 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001824 IOMMUTLBEntry iotlb;
1825
Peter Xufaa362e2017-04-07 18:59:11 +08001826 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001827 if (imrc->replay) {
1828 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001829 return;
1830 }
1831
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001832 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001833
David Gibsona788f222015-09-30 12:13:55 +10001834 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001835 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001836 if (iotlb.perm != IOMMU_NONE) {
1837 n->notify(n, &iotlb);
1838 }
1839
1840 /* if (2^64 - MR size) < granularity, it's possible to get an
1841 * infinite loop here. This should catch such a wraparound */
1842 if ((addr + granularity) < addr) {
1843 break;
1844 }
1845 }
1846}
1847
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001848void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001849{
1850 IOMMUNotifier *notifier;
1851
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001852 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1853 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001854 }
1855}
1856
Peter Xucdb30812016-09-23 13:02:26 +08001857void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1858 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001859{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001860 IOMMUMemoryRegion *iommu_mr;
1861
Jason Wangefcd38c2016-12-30 18:09:17 +08001862 if (mr->alias) {
1863 memory_region_unregister_iommu_notifier(mr->alias, n);
1864 return;
1865 }
Peter Xucdb30812016-09-23 13:02:26 +08001866 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001867 iommu_mr = IOMMU_MEMORY_REGION(mr);
1868 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001869}
1870
Peter Xubd2bfa42017-04-07 18:59:10 +08001871void memory_region_notify_one(IOMMUNotifier *notifier,
1872 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001873{
Peter Xucdb30812016-09-23 13:02:26 +08001874 IOMMUNotifierFlag request_flags;
1875
Peter Xubd2bfa42017-04-07 18:59:10 +08001876 /*
1877 * Skip the notification if the notification does not overlap
1878 * with registered range.
1879 */
Maxime Coquelinb021d1c2017-10-10 11:42:47 +02001880 if (notifier->start > entry->iova + entry->addr_mask ||
Peter Xubd2bfa42017-04-07 18:59:10 +08001881 notifier->end < entry->iova) {
1882 return;
1883 }
Peter Xucdb30812016-09-23 13:02:26 +08001884
Peter Xubd2bfa42017-04-07 18:59:10 +08001885 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001886 request_flags = IOMMU_NOTIFIER_MAP;
1887 } else {
1888 request_flags = IOMMU_NOTIFIER_UNMAP;
1889 }
1890
Peter Xubd2bfa42017-04-07 18:59:10 +08001891 if (notifier->notifier_flags & request_flags) {
1892 notifier->notify(notifier, entry);
1893 }
1894}
1895
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001896void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001897 IOMMUTLBEntry entry)
1898{
1899 IOMMUNotifier *iommu_notifier;
1900
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001901 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001902
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001903 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001904 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001905 }
David Gibson06866572013-05-14 19:13:56 +10001906}
1907
Alexey Kardashevskiyf1334de2018-02-06 11:08:24 -07001908int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1909 enum IOMMUMemoryRegionAttr attr,
1910 void *data)
1911{
1912 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1913
1914 if (!imrc->get_attr) {
1915 return -EINVAL;
1916 }
1917
1918 return imrc->get_attr(iommu_mr, attr, data);
1919}
1920
Avi Kivity093bc2c2011-07-26 14:26:01 +03001921void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1922{
Avi Kivity5a583342011-07-26 14:26:02 +03001923 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001924 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001925
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001926 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001927 old_logging = mr->vga_logging_count;
1928 mr->vga_logging_count += log ? 1 : -1;
1929 if (!!old_logging == !!mr->vga_logging_count) {
1930 return;
1931 }
1932
Jan Kiszka59023ef2012-08-23 13:02:30 +02001933 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001934 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001935 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001936 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001937}
1938
Avi Kivitya8170e52012-10-23 12:30:10 +02001939bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1940 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001941{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001942 assert(mr->ram_block);
1943 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1944 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001945}
1946
Avi Kivitya8170e52012-10-23 12:30:10 +02001947void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1948 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001949{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001950 assert(mr->ram_block);
1951 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1952 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001953 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001954}
1955
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01001956static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001957{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001958 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001959 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001960 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001961 FlatRange *fr;
1962
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001963 /* If the same address space has multiple log_sync listeners, we
1964 * visit that address space's FlatView multiple times. But because
1965 * log_sync listeners are rare, it's still cheaper than walking each
1966 * address space once.
1967 */
1968 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1969 if (!listener->log_sync) {
1970 continue;
1971 }
1972 as = listener->address_space;
1973 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001974 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01001975 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10001976 MemoryRegionSection mrs = section_from_flat_range(fr, view);
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001977 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001978 }
Avi Kivity5a583342011-07-26 14:26:02 +03001979 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001980 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001981 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001982}
1983
Paolo Bonzini0fe1eca2018-02-06 18:24:13 +01001984DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1985 hwaddr addr,
1986 hwaddr size,
1987 unsigned client)
1988{
1989 assert(mr->ram_block);
1990 memory_region_sync_dirty_bitmap(mr);
1991 return cpu_physical_memory_snapshot_and_clear_dirty(
1992 memory_region_get_ram_addr(mr) + addr, size, client);
1993}
1994
1995bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1996 hwaddr addr, hwaddr size)
1997{
1998 assert(mr->ram_block);
1999 return cpu_physical_memory_snapshot_get_dirty(snap,
2000 memory_region_get_ram_addr(mr) + addr, size);
2001}
2002
Avi Kivity093bc2c2011-07-26 14:26:01 +03002003void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2004{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002005 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002006 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002007 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01002008 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002009 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03002010 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002011}
2012
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002013void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002014{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002015 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02002016 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02002017 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01002018 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002019 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03002020 }
2021}
2022
Avi Kivitya8170e52012-10-23 12:30:10 +02002023void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2024 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002025{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002026 assert(mr->ram_block);
2027 cpu_physical_memory_test_and_clear_dirty(
2028 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002029}
2030
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002031int memory_region_get_fd(MemoryRegion *mr)
2032{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002033 int fd;
2034
2035 rcu_read_lock();
2036 while (mr->alias) {
2037 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002038 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002039 fd = mr->ram_block->fd;
2040 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002041
Paolo Bonzini4ff87572016-03-25 12:30:16 +01002042 return fd;
2043}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08002044
Avi Kivity093bc2c2011-07-26 14:26:01 +03002045void *memory_region_get_ram_ptr(MemoryRegion *mr)
2046{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002047 void *ptr;
2048 uint64_t offset = 0;
2049
2050 rcu_read_lock();
2051 while (mr->alias) {
2052 offset += mr->alias_offset;
2053 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002054 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08002055 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002056 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002057 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002058
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002059 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002060}
2061
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002062MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2063{
2064 RAMBlock *block;
2065
2066 block = qemu_ram_block_from_host(ptr, false, offset);
2067 if (!block) {
2068 return NULL;
2069 }
2070
2071 return block->mr;
2072}
2073
Fam Zheng7ebb2742016-03-01 14:18:20 +08002074ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2075{
2076 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2077}
2078
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002079void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2080{
Fam Zheng8e41fb62016-03-01 14:18:21 +08002081 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002082
Gongleifa53a0e2016-05-10 10:04:59 +08002083 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01002084}
2085
Avi Kivity0d673e32012-10-02 15:28:50 +02002086static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002087{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002088 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002089 FlatRange *fr;
2090 CoalescedMemoryRange *cmr;
2091 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02002092 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002093
Paolo Bonzini856d7242013-05-06 11:57:21 +02002094 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002095 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002096 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02002097 section = (MemoryRegionSection) {
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002098 .fv = view,
Avi Kivity95d29942012-10-02 18:21:54 +02002099 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002100 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02002101 };
2102
Paolo Bonzini9a546352016-09-22 16:23:06 +02002103 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002104 int128_get64(fr->addr.start),
2105 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002106 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2107 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02002108 int128_sub(fr->addr.start,
2109 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002110 if (!addrrange_intersects(tmp, fr->addr)) {
2111 continue;
2112 }
2113 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002114 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02002115 int128_get64(tmp.start),
2116 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002117 }
2118 }
2119 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002120 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002121}
2122
Avi Kivity0d673e32012-10-02 15:28:50 +02002123static void memory_region_update_coalesced_range(MemoryRegion *mr)
2124{
2125 AddressSpace *as;
2126
2127 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2128 memory_region_update_coalesced_range_as(mr, as);
2129 }
2130}
2131
Avi Kivity093bc2c2011-07-26 14:26:01 +03002132void memory_region_set_coalescing(MemoryRegion *mr)
2133{
2134 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02002135 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002136}
2137
2138void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002139 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002140 uint64_t size)
2141{
Anthony Liguori7267c092011-08-20 22:09:37 -05002142 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002143
Avi Kivity08dafab2011-10-16 13:19:17 +02002144 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03002145 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2146 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02002147 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002148}
2149
2150void memory_region_clear_coalescing(MemoryRegion *mr)
2151{
2152 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002153 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002154
Jan Kiszkad4105152012-08-23 13:02:29 +02002155 qemu_flush_coalesced_mmio_buffer();
2156 mr->flush_coalesced_mmio = false;
2157
Avi Kivity093bc2c2011-07-26 14:26:01 +03002158 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2159 cmr = QTAILQ_FIRST(&mr->coalesced);
2160 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002161 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002162 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002163 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002164
2165 if (updated) {
2166 memory_region_update_coalesced_range(mr);
2167 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002168}
2169
Jan Kiszkad4105152012-08-23 13:02:29 +02002170void memory_region_set_flush_coalesced(MemoryRegion *mr)
2171{
2172 mr->flush_coalesced_mmio = true;
2173}
2174
2175void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2176{
2177 qemu_flush_coalesced_mmio_buffer();
2178 if (QTAILQ_EMPTY(&mr->coalesced)) {
2179 mr->flush_coalesced_mmio = false;
2180 }
2181}
2182
Jan Kiszka196ea132015-06-18 18:47:20 +02002183void memory_region_clear_global_locking(MemoryRegion *mr)
2184{
2185 mr->global_locking = false;
2186}
2187
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002188static bool userspace_eventfd_warning;
2189
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002190void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002191 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002192 unsigned size,
2193 bool match_data,
2194 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002195 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002196{
2197 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002198 .addr.start = int128_make64(addr),
2199 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002200 .match_data = match_data,
2201 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002202 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002203 };
2204 unsigned i;
2205
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002206 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2207 userspace_eventfd_warning))) {
2208 userspace_eventfd_warning = true;
2209 error_report("Using eventfd without MMIO binding in KVM. "
2210 "Suboptimal performance expected");
2211 }
2212
Jason Wangb8aecea2015-11-06 16:02:45 +08002213 if (size) {
2214 adjust_endianness(mr, &mrfd.data, size);
2215 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002216 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002217 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2218 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2219 break;
2220 }
2221 }
2222 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002223 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002224 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2225 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2226 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2227 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002228 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002229 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002230}
2231
2232void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002233 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002234 unsigned size,
2235 bool match_data,
2236 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002237 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002238{
2239 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002240 .addr.start = int128_make64(addr),
2241 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002242 .match_data = match_data,
2243 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002244 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002245 };
2246 unsigned i;
2247
Jason Wangb8aecea2015-11-06 16:02:45 +08002248 if (size) {
2249 adjust_endianness(mr, &mrfd.data, size);
2250 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002251 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002252 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2253 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2254 break;
2255 }
2256 }
2257 assert(i != mr->ioeventfd_nb);
2258 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2259 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2260 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002261 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002262 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002263 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002264 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002265}
2266
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002267static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002268{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002269 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002270 MemoryRegion *other;
2271
Jan Kiszka59023ef2012-08-23 13:02:30 +02002272 memory_region_transaction_begin();
2273
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002274 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002275 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002276 if (subregion->priority >= other->priority) {
2277 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2278 goto done;
2279 }
2280 }
2281 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2282done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002283 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002284 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002285}
2286
Peter Crosthwaite05987012014-06-05 23:14:44 -07002287static void memory_region_add_subregion_common(MemoryRegion *mr,
2288 hwaddr offset,
2289 MemoryRegion *subregion)
2290{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002291 assert(!subregion->container);
2292 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002293 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002294 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002295}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002296
2297void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002298 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002299 MemoryRegion *subregion)
2300{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002301 subregion->priority = 0;
2302 memory_region_add_subregion_common(mr, offset, subregion);
2303}
2304
2305void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002306 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002307 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002308 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002309{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002310 subregion->priority = priority;
2311 memory_region_add_subregion_common(mr, offset, subregion);
2312}
2313
2314void memory_region_del_subregion(MemoryRegion *mr,
2315 MemoryRegion *subregion)
2316{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002317 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002318 assert(subregion->container == mr);
2319 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002320 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002321 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002322 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002323 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002324}
2325
2326void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2327{
2328 if (enabled == mr->enabled) {
2329 return;
2330 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002331 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002332 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002333 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002334 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002335}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002336
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002337void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2338{
2339 Int128 s = int128_make64(size);
2340
2341 if (size == UINT64_MAX) {
2342 s = int128_2_64();
2343 }
2344 if (int128_eq(s, mr->size)) {
2345 return;
2346 }
2347 memory_region_transaction_begin();
2348 mr->size = s;
2349 memory_region_update_pending = true;
2350 memory_region_transaction_commit();
2351}
2352
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002353static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002354{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002355 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002356
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002357 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002358 memory_region_transaction_begin();
2359 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002360 memory_region_del_subregion(container, mr);
2361 mr->container = container;
2362 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002363 memory_region_unref(mr);
2364 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002365 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002366}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002367
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002368void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2369{
2370 if (addr != mr->addr) {
2371 mr->addr = addr;
2372 memory_region_readd_subregion(mr);
2373 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002374}
2375
Avi Kivitya8170e52012-10-23 12:30:10 +02002376void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002377{
Avi Kivity47033592011-12-04 19:16:50 +02002378 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002379
Jan Kiszka59023ef2012-08-23 13:02:30 +02002380 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002381 return;
2382 }
2383
Jan Kiszka59023ef2012-08-23 13:02:30 +02002384 memory_region_transaction_begin();
2385 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002386 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002387 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002388}
2389
Igor Mammedova2b257d2014-10-31 16:38:37 +00002390uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2391{
2392 return mr->align;
2393}
2394
Avi Kivitye2177952011-12-08 15:00:18 +02002395static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2396{
2397 const AddrRange *addr = addr_;
2398 const FlatRange *fr = fr_;
2399
2400 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2401 return -1;
2402 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2403 return 1;
2404 }
2405 return 0;
2406}
2407
Paolo Bonzini99e86342013-05-06 10:26:13 +02002408static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002409{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002410 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002411 sizeof(FlatRange), cmp_flatrange_addr);
2412}
2413
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002414bool memory_region_is_mapped(MemoryRegion *mr)
2415{
2416 return mr->container ? true : false;
2417}
2418
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002419/* Same as memory_region_find, but it does not add a reference to the
2420 * returned region. It must be called from an RCU critical section.
2421 */
2422static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2423 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002424{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002425 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002426 MemoryRegion *root;
2427 AddressSpace *as;
2428 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002429 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002430 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002431
Paolo Bonzini73034e92013-05-07 15:48:28 +02002432 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002433 for (root = mr; root->container; ) {
2434 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002435 addr += root->addr;
2436 }
2437
2438 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002439 if (!as) {
2440 return ret;
2441 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002442 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002443
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002444 view = address_space_to_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002445 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002446 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002447 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002448 }
2449
Paolo Bonzini99e86342013-05-06 10:26:13 +02002450 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002451 --fr;
2452 }
2453
2454 ret.mr = fr->mr;
Alexey Kardashevskiy16620682017-09-21 18:50:58 +10002455 ret.fv = view;
Avi Kivitye2177952011-12-08 15:00:18 +02002456 range = addrrange_intersection(range, fr->addr);
2457 ret.offset_within_region = fr->offset_in_region;
2458 ret.offset_within_region += int128_get64(int128_sub(range.start,
2459 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002460 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002461 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002462 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002463 return ret;
2464}
2465
2466MemoryRegionSection memory_region_find(MemoryRegion *mr,
2467 hwaddr addr, uint64_t size)
2468{
2469 MemoryRegionSection ret;
2470 rcu_read_lock();
2471 ret = memory_region_find_rcu(mr, addr, size);
2472 if (ret.mr) {
2473 memory_region_ref(ret.mr);
2474 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002475 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002476 return ret;
2477}
2478
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002479bool memory_region_present(MemoryRegion *container, hwaddr addr)
2480{
2481 MemoryRegion *mr;
2482
2483 rcu_read_lock();
2484 mr = memory_region_find_rcu(container, addr, 1).mr;
2485 rcu_read_unlock();
2486 return mr && mr != container;
2487}
2488
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002489void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002490{
Paolo Bonzini3ebb1812018-02-06 17:55:27 +01002491 memory_region_sync_dirty_bitmap(NULL);
Avi Kivity7664e802011-12-11 14:47:25 +02002492}
2493
Jay Zhou19310762017-07-28 18:28:53 +08002494static VMChangeStateEntry *vmstate_change;
2495
Avi Kivity7664e802011-12-11 14:47:25 +02002496void memory_global_dirty_log_start(void)
2497{
Jay Zhou19310762017-07-28 18:28:53 +08002498 if (vmstate_change) {
2499 qemu_del_vm_change_state_handler(vmstate_change);
2500 vmstate_change = NULL;
2501 }
2502
Avi Kivity7664e802011-12-11 14:47:25 +02002503 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002504
Avi Kivity7376e582012-02-08 21:05:17 +02002505 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002506
2507 /* Refresh DIRTY_LOG_MIGRATION bit. */
2508 memory_region_transaction_begin();
2509 memory_region_update_pending = true;
2510 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002511}
2512
Jay Zhou19310762017-07-28 18:28:53 +08002513static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002514{
Avi Kivity7664e802011-12-11 14:47:25 +02002515 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002516
2517 /* Refresh DIRTY_LOG_MIGRATION bit. */
2518 memory_region_transaction_begin();
2519 memory_region_update_pending = true;
2520 memory_region_transaction_commit();
2521
Avi Kivity7376e582012-02-08 21:05:17 +02002522 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002523}
2524
Jay Zhou19310762017-07-28 18:28:53 +08002525static void memory_vm_change_state_handler(void *opaque, int running,
2526 RunState state)
2527{
2528 if (running) {
2529 memory_global_dirty_log_do_stop();
2530
2531 if (vmstate_change) {
2532 qemu_del_vm_change_state_handler(vmstate_change);
2533 vmstate_change = NULL;
2534 }
2535 }
2536}
2537
2538void memory_global_dirty_log_stop(void)
2539{
2540 if (!runstate_is_running()) {
2541 if (vmstate_change) {
2542 return;
2543 }
2544 vmstate_change = qemu_add_vm_change_state_handler(
2545 memory_vm_change_state_handler, NULL);
2546 return;
2547 }
2548
2549 memory_global_dirty_log_do_stop();
2550}
2551
Avi Kivity7664e802011-12-11 14:47:25 +02002552static void listener_add_address_space(MemoryListener *listener,
2553 AddressSpace *as)
2554{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002555 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002556 FlatRange *fr;
2557
Paolo Bonzini680a4782015-11-02 09:23:52 +01002558 if (listener->begin) {
2559 listener->begin(listener);
2560 }
Avi Kivity7664e802011-12-11 14:47:25 +02002561 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002562 if (listener->log_global_start) {
2563 listener->log_global_start(listener);
2564 }
Avi Kivity7664e802011-12-11 14:47:25 +02002565 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002566
Paolo Bonzini856d7242013-05-06 11:57:21 +02002567 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002568 FOR_EACH_FLAT_RANGE(fr, view) {
David Hildenbrand279836f2017-10-16 16:43:02 +02002569 MemoryRegionSection section = section_from_flat_range(fr, view);
2570
Avi Kivity975aefe2012-10-02 16:39:57 +02002571 if (listener->region_add) {
2572 listener->region_add(listener, &section);
2573 }
David Hildenbrandae990e62017-10-16 16:42:56 +02002574 if (fr->dirty_log_mask && listener->log_start) {
2575 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2576 }
Avi Kivity7664e802011-12-11 14:47:25 +02002577 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002578 if (listener->commit) {
2579 listener->commit(listener);
2580 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002581 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002582}
2583
Peter Xud25836c2018-01-22 14:02:44 +08002584static void listener_del_address_space(MemoryListener *listener,
2585 AddressSpace *as)
2586{
2587 FlatView *view;
2588 FlatRange *fr;
2589
2590 if (listener->begin) {
2591 listener->begin(listener);
2592 }
2593 view = address_space_get_flatview(as);
2594 FOR_EACH_FLAT_RANGE(fr, view) {
2595 MemoryRegionSection section = section_from_flat_range(fr, view);
2596
2597 if (fr->dirty_log_mask && listener->log_stop) {
2598 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2599 }
2600 if (listener->region_del) {
2601 listener->region_del(listener, &section);
2602 }
2603 }
2604 if (listener->commit) {
2605 listener->commit(listener);
2606 }
2607 flatview_unref(view);
2608}
2609
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002610void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002611{
Avi Kivity72e22d22012-02-08 15:05:50 +02002612 MemoryListener *other = NULL;
2613
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002614 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002615 if (QTAILQ_EMPTY(&memory_listeners)
2616 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2617 memory_listeners)->priority) {
2618 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2619 } else {
2620 QTAILQ_FOREACH(other, &memory_listeners, link) {
2621 if (listener->priority < other->priority) {
2622 break;
2623 }
2624 }
2625 QTAILQ_INSERT_BEFORE(other, listener, link);
2626 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002627
Paolo Bonzini9a546352016-09-22 16:23:06 +02002628 if (QTAILQ_EMPTY(&as->listeners)
2629 || listener->priority >= QTAILQ_LAST(&as->listeners,
2630 memory_listeners)->priority) {
2631 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2632 } else {
2633 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2634 if (listener->priority < other->priority) {
2635 break;
2636 }
2637 }
2638 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2639 }
2640
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002641 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002642}
2643
2644void memory_listener_unregister(MemoryListener *listener)
2645{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002646 if (!listener->address_space) {
2647 return;
2648 }
2649
Peter Xud25836c2018-01-22 14:02:44 +08002650 listener_del_address_space(listener, listener->address_space);
Avi Kivity72e22d22012-02-08 15:05:50 +02002651 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002652 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002653 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002654}
Avi Kivitye2177952011-12-08 15:00:18 +02002655
KONRAD Fredericc9356742016-10-19 15:06:49 +02002656bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2657{
2658 void *host;
2659 unsigned size = 0;
2660 unsigned offset = 0;
2661 Object *new_interface;
2662
2663 if (!mr || !mr->ops->request_ptr) {
2664 return false;
2665 }
2666
2667 /*
2668 * Avoid an update if the request_ptr call
2669 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2670 * a cache.
2671 */
2672 memory_region_transaction_begin();
2673
2674 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2675
2676 if (!host || !size) {
2677 memory_region_transaction_commit();
2678 return false;
2679 }
2680
2681 new_interface = object_new("mmio_interface");
2682 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2683 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2684 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2685 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2686 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2687 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2688
2689 memory_region_transaction_commit();
2690 return true;
2691}
2692
2693typedef struct MMIOPtrInvalidate {
2694 MemoryRegion *mr;
2695 hwaddr offset;
2696 unsigned size;
2697 int busy;
2698 int allocated;
2699} MMIOPtrInvalidate;
2700
2701#define MAX_MMIO_INVALIDATE 10
2702static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2703
2704static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2705 run_on_cpu_data data)
2706{
2707 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2708 MemoryRegion *mr = invalidate_data->mr;
2709 hwaddr offset = invalidate_data->offset;
2710 unsigned size = invalidate_data->size;
2711 MemoryRegionSection section = memory_region_find(mr, offset, size);
2712
2713 qemu_mutex_lock_iothread();
2714
2715 /* Reset dirty so this doesn't happen later. */
2716 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2717
2718 if (section.mr != mr) {
2719 /* memory_region_find add a ref on section.mr */
2720 memory_region_unref(section.mr);
2721 if (MMIO_INTERFACE(section.mr->owner)) {
2722 /* We found the interface just drop it. */
2723 object_property_set_bool(section.mr->owner, false, "realized",
2724 NULL);
2725 object_unref(section.mr->owner);
2726 object_unparent(section.mr->owner);
2727 }
2728 }
2729
2730 qemu_mutex_unlock_iothread();
2731
2732 if (invalidate_data->allocated) {
2733 g_free(invalidate_data);
2734 } else {
2735 invalidate_data->busy = 0;
2736 }
2737}
2738
2739void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2740 unsigned size)
2741{
2742 size_t i;
2743 MMIOPtrInvalidate *invalidate_data = NULL;
2744
2745 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2746 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2747 invalidate_data = &mmio_ptr_invalidate_list[i];
2748 break;
2749 }
2750 }
2751
2752 if (!invalidate_data) {
2753 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2754 invalidate_data->allocated = 1;
2755 }
2756
2757 invalidate_data->mr = mr;
2758 invalidate_data->offset = offset;
2759 invalidate_data->size = size;
2760
2761 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2762 RUN_ON_CPU_HOST_PTR(invalidate_data));
2763}
2764
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002765void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002766{
Paolo Bonziniac951902015-02-11 15:21:04 +01002767 memory_region_ref(root);
Avi Kivity8786db72012-10-02 13:53:41 +02002768 as->root = root;
Alexey Kardashevskiy67ace392017-09-21 18:51:05 +10002769 as->current_map = NULL;
Avi Kivity4c19eb72012-10-30 13:47:44 +02002770 as->ioeventfd_nb = 0;
2771 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002772 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002773 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002774 as->name = g_strdup(name ? name : "anonymous");
Alexey Kardashevskiy202fc012017-09-21 18:51:09 +10002775 address_space_update_topology(as);
2776 address_space_update_ioeventfds(as);
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002777}
Avi Kivity658b2222011-07-26 14:26:08 +03002778
Paolo Bonzini374f2982013-05-17 12:37:03 +02002779static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002780{
Paolo Bonzini9a546352016-09-22 16:23:06 +02002781 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002782
Paolo Bonzini856d7242013-05-06 11:57:21 +02002783 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002784 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002785 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002786 memory_region_unref(as->root);
Avi Kivity83f3c252012-10-07 12:59:55 +02002787}
2788
Paolo Bonzini374f2982013-05-17 12:37:03 +02002789void address_space_destroy(AddressSpace *as)
2790{
Paolo Bonziniac951902015-02-11 15:21:04 +01002791 MemoryRegion *root = as->root;
2792
Paolo Bonzini374f2982013-05-17 12:37:03 +02002793 /* Flush out anything from MemoryListeners listening in on this */
2794 memory_region_transaction_begin();
2795 as->root = NULL;
2796 memory_region_transaction_commit();
2797 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2798
2799 /* At this point, as->dispatch and as->current_map are dummy
2800 * entries that the guest should never use. Wait for the old
2801 * values to expire before freeing the data.
2802 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002803 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002804 call_rcu(as, do_address_space_destroy, rcu);
2805}
2806
Peter Xu4e831902017-01-16 16:40:04 +08002807static const char *memory_region_type(MemoryRegion *mr)
2808{
2809 if (memory_region_is_ram_device(mr)) {
2810 return "ramd";
2811 } else if (memory_region_is_romd(mr)) {
2812 return "romd";
2813 } else if (memory_region_is_rom(mr)) {
2814 return "rom";
2815 } else if (memory_region_is_ram(mr)) {
2816 return "ram";
2817 } else {
2818 return "i/o";
2819 }
2820}
2821
Blue Swirl314e2982011-09-11 20:22:05 +00002822typedef struct MemoryRegionList MemoryRegionList;
2823
2824struct MemoryRegionList {
2825 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002826 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002827};
2828
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002829typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002830
Peter Xu4e831902017-01-16 16:40:04 +08002831#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2832 int128_sub((size), int128_one())) : 0)
2833#define MTREE_INDENT " "
2834
Blue Swirl314e2982011-09-11 20:22:05 +00002835static void mtree_print_mr(fprintf_function mon_printf, void *f,
2836 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002837 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002838 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002839{
Jan Kiszka9479c572011-09-27 15:00:41 +02002840 MemoryRegionList *new_ml, *ml, *next_ml;
2841 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002842 const MemoryRegion *submr;
2843 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002844 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002845
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002846 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002847 return;
2848 }
2849
2850 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002851 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002852 }
2853
Peter Xub31f8412017-03-14 20:56:27 +08002854 cur_start = base + mr->addr;
2855 cur_end = cur_start + MR_SIZE(mr->size);
2856
2857 /*
2858 * Try to detect overflow of memory region. This should never
2859 * happen normally. When it happens, we dump something to warn the
2860 * user who is observing this.
2861 */
2862 if (cur_start < base || cur_end < cur_start) {
2863 mon_printf(f, "[DETECTED OVERFLOW!] ");
2864 }
2865
Blue Swirl314e2982011-09-11 20:22:05 +00002866 if (mr->alias) {
2867 MemoryRegionList *ml;
2868 bool found = false;
2869
2870 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002871 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002872 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002873 found = true;
2874 }
2875 }
2876
2877 if (!found) {
2878 ml = g_new(MemoryRegionList, 1);
2879 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002880 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002881 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002882 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002883 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002884 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002885 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002886 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002887 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002888 memory_region_name(mr),
2889 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002890 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002891 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002892 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002893 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002894 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002895 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002896 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002897 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002898 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002899 memory_region_name(mr),
2900 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002901 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002902
2903 QTAILQ_INIT(&submr_print_queue);
2904
Blue Swirl314e2982011-09-11 20:22:05 +00002905 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002906 new_ml = g_new(MemoryRegionList, 1);
2907 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002908 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002909 if (new_ml->mr->addr < ml->mr->addr ||
2910 (new_ml->mr->addr == ml->mr->addr &&
2911 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002912 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002913 new_ml = NULL;
2914 break;
2915 }
2916 }
2917 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002918 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002919 }
2920 }
2921
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002922 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002923 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002924 alias_print_queue);
2925 }
2926
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002927 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002928 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002929 }
2930}
2931
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002932struct FlatViewInfo {
2933 fprintf_function mon_printf;
2934 void *f;
2935 int counter;
2936 bool dispatch_tree;
2937};
2938
2939static void mtree_print_flatview(gpointer key, gpointer value,
2940 gpointer user_data)
Peter Xu57bb40c2017-01-16 16:40:05 +08002941{
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002942 FlatView *view = key;
2943 GArray *fv_address_spaces = value;
2944 struct FlatViewInfo *fvi = user_data;
2945 fprintf_function p = fvi->mon_printf;
2946 void *f = fvi->f;
Peter Xu57bb40c2017-01-16 16:40:05 +08002947 FlatRange *range = &view->ranges[0];
2948 MemoryRegion *mr;
2949 int n = view->nr;
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002950 int i;
2951 AddressSpace *as;
2952
2953 p(f, "FlatView #%d\n", fvi->counter);
2954 ++fvi->counter;
2955
2956 for (i = 0; i < fv_address_spaces->len; ++i) {
2957 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2958 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2959 if (as->root->alias) {
2960 p(f, ", alias %s", memory_region_name(as->root->alias));
2961 }
2962 p(f, "\n");
2963 }
2964
2965 p(f, " Root memory region: %s\n",
2966 view->root ? memory_region_name(view->root) : "(none)");
Peter Xu57bb40c2017-01-16 16:40:05 +08002967
2968 if (n <= 0) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002969 p(f, MTREE_INDENT "No rendered FlatView\n\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08002970 return;
2971 }
2972
2973 while (n--) {
2974 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002975 if (range->offset_in_region) {
2976 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2977 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2978 int128_get64(range->addr.start),
2979 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2980 mr->priority,
2981 range->readonly ? "rom" : memory_region_type(mr),
2982 memory_region_name(mr),
2983 range->offset_in_region);
2984 } else {
2985 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2986 TARGET_FMT_plx " (prio %d, %s): %s\n",
2987 int128_get64(range->addr.start),
2988 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2989 mr->priority,
2990 range->readonly ? "rom" : memory_region_type(mr),
2991 memory_region_name(mr));
2992 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002993 range++;
2994 }
2995
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10002996#if !defined(CONFIG_USER_ONLY)
2997 if (fvi->dispatch_tree && view->root) {
2998 mtree_print_dispatch(p, f, view->dispatch, view->root);
2999 }
3000#endif
3001
3002 p(f, "\n");
Peter Xu57bb40c2017-01-16 16:40:05 +08003003}
3004
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003005static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3006 gpointer user_data)
3007{
3008 FlatView *view = key;
3009 GArray *fv_address_spaces = value;
3010
3011 g_array_unref(fv_address_spaces);
3012 flatview_unref(view);
3013
3014 return true;
3015}
3016
3017void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3018 bool dispatch_tree)
Blue Swirl314e2982011-09-11 20:22:05 +00003019{
3020 MemoryRegionListHead ml_head;
3021 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02003022 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00003023
Peter Xu57bb40c2017-01-16 16:40:05 +08003024 if (flatview) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003025 FlatView *view;
3026 struct FlatViewInfo fvi = {
3027 .mon_printf = mon_printf,
3028 .f = f,
3029 .counter = 0,
3030 .dispatch_tree = dispatch_tree
3031 };
3032 GArray *fv_address_spaces;
3033 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3034
3035 /* Gather all FVs in one table */
Peter Xu57bb40c2017-01-16 16:40:05 +08003036 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003037 view = address_space_get_flatview(as);
3038
3039 fv_address_spaces = g_hash_table_lookup(views, view);
3040 if (!fv_address_spaces) {
3041 fv_address_spaces = g_array_new(false, false, sizeof(as));
3042 g_hash_table_insert(views, view, fv_address_spaces);
3043 }
3044
3045 g_array_append_val(fv_address_spaces, as);
Peter Xu57bb40c2017-01-16 16:40:05 +08003046 }
Alexey Kardashevskiy5e8fd942017-09-21 18:51:06 +10003047
3048 /* Print */
3049 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3050
3051 /* Free */
3052 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3053 g_hash_table_unref(views);
3054
Peter Xu57bb40c2017-01-16 16:40:05 +08003055 return;
3056 }
3057
Blue Swirl314e2982011-09-11 20:22:05 +00003058 QTAILQ_INIT(&ml_head);
3059
Avi Kivity0d673e32012-10-02 15:28:50 +02003060 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003061 mon_printf(f, "address-space: %s\n", as->name);
3062 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3063 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00003064 }
3065
Blue Swirl314e2982011-09-11 20:22:05 +00003066 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003067 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02003068 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3069 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3070 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00003071 }
3072
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02003073 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02003074 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00003075 }
Blue Swirl314e2982011-09-11 20:22:05 +00003076}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003077
Peter Maydellb08199c2017-07-07 15:42:51 +01003078void memory_region_init_ram(MemoryRegion *mr,
3079 struct Object *owner,
3080 const char *name,
3081 uint64_t size,
3082 Error **errp)
3083{
3084 DeviceState *owner_dev;
3085 Error *err = NULL;
3086
3087 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3088 if (err) {
3089 error_propagate(errp, err);
3090 return;
3091 }
3092 /* This will assert if owner is neither NULL nor a DeviceState.
3093 * We only want the owner here for the purposes of defining a
3094 * unique name for migration. TODO: Ideally we should implement
3095 * a naming scheme for Objects which are not DeviceStates, in
3096 * which case we can relax this restriction.
3097 */
3098 owner_dev = DEVICE(owner);
3099 vmstate_register_ram(mr, owner_dev);
3100}
3101
3102void memory_region_init_rom(MemoryRegion *mr,
3103 struct Object *owner,
3104 const char *name,
3105 uint64_t size,
3106 Error **errp)
3107{
3108 DeviceState *owner_dev;
3109 Error *err = NULL;
3110
3111 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3112 if (err) {
3113 error_propagate(errp, err);
3114 return;
3115 }
3116 /* This will assert if owner is neither NULL nor a DeviceState.
3117 * We only want the owner here for the purposes of defining a
3118 * unique name for migration. TODO: Ideally we should implement
3119 * a naming scheme for Objects which are not DeviceStates, in
3120 * which case we can relax this restriction.
3121 */
3122 owner_dev = DEVICE(owner);
3123 vmstate_register_ram(mr, owner_dev);
3124}
3125
3126void memory_region_init_rom_device(MemoryRegion *mr,
3127 struct Object *owner,
3128 const MemoryRegionOps *ops,
3129 void *opaque,
3130 const char *name,
3131 uint64_t size,
3132 Error **errp)
3133{
3134 DeviceState *owner_dev;
3135 Error *err = NULL;
3136
3137 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3138 name, size, &err);
3139 if (err) {
3140 error_propagate(errp, err);
3141 return;
3142 }
3143 /* This will assert if owner is neither NULL nor a DeviceState.
3144 * We only want the owner here for the purposes of defining a
3145 * unique name for migration. TODO: Ideally we should implement
3146 * a naming scheme for Objects which are not DeviceStates, in
3147 * which case we can relax this restriction.
3148 */
3149 owner_dev = DEVICE(owner);
3150 vmstate_register_ram(mr, owner_dev);
3151}
3152
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003153static const TypeInfo memory_region_info = {
3154 .parent = TYPE_OBJECT,
3155 .name = TYPE_MEMORY_REGION,
3156 .instance_size = sizeof(MemoryRegion),
3157 .instance_init = memory_region_initfn,
3158 .instance_finalize = memory_region_finalize,
3159};
3160
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003161static const TypeInfo iommu_memory_region_info = {
3162 .parent = TYPE_MEMORY_REGION,
3163 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003164 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003165 .instance_size = sizeof(IOMMUMemoryRegion),
3166 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10003167 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003168};
3169
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003170static void memory_register_types(void)
3171{
3172 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10003173 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07003174}
3175
3176type_init(memory_register_types)