blob: 45e10e2e3bb8d8e2e7518f39215d973d1c297924 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020035
Paolo Bonzinid1970632013-05-24 13:23:38 +020036//#define DEBUG_UNASSIGNED
37
Jan Kiszka22bde712012-11-05 16:45:56 +010038static unsigned memory_region_transaction_depth;
39static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080040static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020041static bool global_dirty_log = false;
42
Avi Kivity72e22d22012-02-08 15:05:50 +020043static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
44 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030045
Avi Kivity0d673e32012-10-02 15:28:50 +020046static QTAILQ_HEAD(, AddressSpace) address_spaces
47 = QTAILQ_HEAD_INITIALIZER(address_spaces);
48
Avi Kivity093bc2c2011-07-26 14:26:01 +030049typedef struct AddrRange AddrRange;
50
Avi Kivity8417ceb2011-08-03 11:56:14 +030051/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080052 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030053 * (large MemoryRegion::alias_offset).
54 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030055struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020056 Int128 start;
57 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030058};
59
Avi Kivity08dafab2011-10-16 13:19:17 +020060static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030061{
62 return (AddrRange) { start, size };
63}
64
65static bool addrrange_equal(AddrRange r1, AddrRange r2)
66{
Avi Kivity08dafab2011-10-16 13:19:17 +020067 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030068}
69
Avi Kivity08dafab2011-10-16 13:19:17 +020070static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030071{
Avi Kivity08dafab2011-10-16 13:19:17 +020072 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030073}
74
Avi Kivity08dafab2011-10-16 13:19:17 +020075static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030076{
Avi Kivity08dafab2011-10-16 13:19:17 +020077 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030078 return range;
79}
80
Avi Kivity08dafab2011-10-16 13:19:17 +020081static bool addrrange_contains(AddrRange range, Int128 addr)
82{
83 return int128_ge(addr, range.start)
84 && int128_lt(addr, addrrange_end(range));
85}
86
Avi Kivity093bc2c2011-07-26 14:26:01 +030087static bool addrrange_intersects(AddrRange r1, AddrRange r2)
88{
Avi Kivity08dafab2011-10-16 13:19:17 +020089 return addrrange_contains(r1, r2.start)
90 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030091}
92
93static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
94{
Avi Kivity08dafab2011-10-16 13:19:17 +020095 Int128 start = int128_max(r1.start, r2.start);
96 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
97 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +030098}
99
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200100enum ListenerDirection { Forward, Reverse };
101
Avi Kivity7376e582012-02-08 21:05:17 +0200102#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200103 do { \
104 MemoryListener *_listener; \
105 \
106 switch (_direction) { \
107 case Forward: \
108 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200109 if (_listener->_callback) { \
110 _listener->_callback(_listener, ##_args); \
111 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200112 } \
113 break; \
114 case Reverse: \
115 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
116 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200117 if (_listener->_callback) { \
118 _listener->_callback(_listener, ##_args); \
119 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200120 } \
121 break; \
122 default: \
123 abort(); \
124 } \
125 } while (0)
126
Paolo Bonzini9a546352016-09-22 16:23:06 +0200127#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200128 do { \
129 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200130 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200131 \
132 switch (_direction) { \
133 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200134 QTAILQ_FOREACH(_listener, list, link_as) { \
135 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200136 _listener->_callback(_listener, _section, ##_args); \
137 } \
138 } \
139 break; \
140 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200141 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
142 link_as) { \
143 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200144 _listener->_callback(_listener, _section, ##_args); \
145 } \
146 } \
147 break; \
148 default: \
149 abort(); \
150 } \
151 } while (0)
152
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200153/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200154#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200155 do { \
156 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200157 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200158 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200159
Avi Kivity093bc2c2011-07-26 14:26:01 +0300160struct CoalescedMemoryRange {
161 AddrRange addr;
162 QTAILQ_ENTRY(CoalescedMemoryRange) link;
163};
164
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300165struct MemoryRegionIoeventfd {
166 AddrRange addr;
167 bool match_data;
168 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200169 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300170};
171
172static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
173 MemoryRegionIoeventfd b)
174{
Avi Kivity08dafab2011-10-16 13:19:17 +0200175 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300176 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200177 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300178 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200179 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300180 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200181 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300182 return false;
183 } else if (a.match_data < b.match_data) {
184 return true;
185 } else if (a.match_data > b.match_data) {
186 return false;
187 } else if (a.match_data) {
188 if (a.data < b.data) {
189 return true;
190 } else if (a.data > b.data) {
191 return false;
192 }
193 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200194 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300195 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200196 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300197 return false;
198 }
199 return false;
200}
201
202static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
203 MemoryRegionIoeventfd b)
204{
205 return !memory_region_ioeventfd_before(a, b)
206 && !memory_region_ioeventfd_before(b, a);
207}
208
Avi Kivity093bc2c2011-07-26 14:26:01 +0300209typedef struct FlatRange FlatRange;
210typedef struct FlatView FlatView;
211
212/* Range of memory in the global map. Addresses are absolute. */
213struct FlatRange {
214 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200215 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300216 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300217 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200218 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300219 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300220};
221
222/* Flattened global view of current active memory hierarchy. Kept in sorted
223 * order.
224 */
225struct FlatView {
Paolo Bonzini374f2982013-05-17 12:37:03 +0200226 struct rcu_head rcu;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200227 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300228 FlatRange *ranges;
229 unsigned nr;
230 unsigned nr_allocated;
231};
232
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300233typedef struct AddressSpaceOps AddressSpaceOps;
234
Avi Kivity093bc2c2011-07-26 14:26:01 +0300235#define FOR_EACH_FLAT_RANGE(var, view) \
236 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
237
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200238static inline MemoryRegionSection
239section_from_flat_range(FlatRange *fr, AddressSpace *as)
240{
241 return (MemoryRegionSection) {
242 .mr = fr->mr,
243 .address_space = as,
244 .offset_within_region = fr->offset_in_region,
245 .size = fr->addr.size,
246 .offset_within_address_space = int128_get64(fr->addr.start),
247 .readonly = fr->readonly,
248 };
249}
250
Avi Kivity093bc2c2011-07-26 14:26:01 +0300251static bool flatrange_equal(FlatRange *a, FlatRange *b)
252{
253 return a->mr == b->mr
254 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300255 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200256 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300257 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300258}
259
260static void flatview_init(FlatView *view)
261{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200262 view->ref = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300263 view->ranges = NULL;
264 view->nr = 0;
265 view->nr_allocated = 0;
266}
267
268/* Insert a range into a given position. Caller is responsible for maintaining
269 * sorting order.
270 */
271static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
272{
273 if (view->nr == view->nr_allocated) {
274 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500275 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300276 view->nr_allocated * sizeof(*view->ranges));
277 }
278 memmove(view->ranges + pos + 1, view->ranges + pos,
279 (view->nr - pos) * sizeof(FlatRange));
280 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200281 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300282 ++view->nr;
283}
284
285static void flatview_destroy(FlatView *view)
286{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200287 int i;
288
289 for (i = 0; i < view->nr; i++) {
290 memory_region_unref(view->ranges[i].mr);
291 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500292 g_free(view->ranges);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200293 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300294}
295
Paolo Bonzini856d7242013-05-06 11:57:21 +0200296static void flatview_ref(FlatView *view)
297{
298 atomic_inc(&view->ref);
299}
300
301static void flatview_unref(FlatView *view)
302{
303 if (atomic_fetch_dec(&view->ref) == 1) {
304 flatview_destroy(view);
305 }
306}
307
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300308static bool can_merge(FlatRange *r1, FlatRange *r2)
309{
Avi Kivity08dafab2011-10-16 13:19:17 +0200310 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300311 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200312 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
313 r1->addr.size),
314 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300315 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200316 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300317 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300318}
319
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000320/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300321static void flatview_simplify(FlatView *view)
322{
323 unsigned i, j;
324
325 i = 0;
326 while (i < view->nr) {
327 j = i + 1;
328 while (j < view->nr
329 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200330 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300331 ++j;
332 }
333 ++i;
334 memmove(&view->ranges[i], &view->ranges[j],
335 (view->nr - j) * sizeof(view->ranges[j]));
336 view->nr -= j - i;
337 }
338}
339
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200340static bool memory_region_big_endian(MemoryRegion *mr)
341{
342#ifdef TARGET_WORDS_BIGENDIAN
343 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
344#else
345 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
346#endif
347}
348
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200349static bool memory_region_wrong_endianness(MemoryRegion *mr)
350{
351#ifdef TARGET_WORDS_BIGENDIAN
352 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
353#else
354 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
355#endif
356}
357
358static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
359{
360 if (memory_region_wrong_endianness(mr)) {
361 switch (size) {
362 case 1:
363 break;
364 case 2:
365 *data = bswap16(*data);
366 break;
367 case 4:
368 *data = bswap32(*data);
369 break;
370 case 8:
371 *data = bswap64(*data);
372 break;
373 default:
374 abort();
375 }
376 }
377}
378
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800379static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
380{
381 MemoryRegion *root;
382 hwaddr abs_addr = offset;
383
384 abs_addr += mr->addr;
385 for (root = mr; root->container; ) {
386 root = root->container;
387 abs_addr += root->addr;
388 }
389
390 return abs_addr;
391}
392
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800393static int get_cpu_index(void)
394{
395 if (current_cpu) {
396 return current_cpu->cpu_index;
397 }
398 return -1;
399}
400
Peter Maydellcc05c432015-04-26 16:49:23 +0100401static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
402 hwaddr addr,
403 uint64_t *value,
404 unsigned size,
405 unsigned shift,
406 uint64_t mask,
407 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200408{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200409 uint64_t tmp;
410
411 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800412 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800413 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800414 } else if (mr == &io_mem_notdirty) {
415 /* Accesses to code which has previously been translated into a TB show
416 * up in the MMIO path, as accesses to the io_mem_notdirty
417 * MemoryRegion. */
418 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800419 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
420 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800421 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800422 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200423 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100424 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200425}
426
Peter Maydellcc05c432015-04-26 16:49:23 +0100427static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
428 hwaddr addr,
429 uint64_t *value,
430 unsigned size,
431 unsigned shift,
432 uint64_t mask,
433 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300434{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300435 uint64_t tmp;
436
437 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800438 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800439 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800440 } else if (mr == &io_mem_notdirty) {
441 /* Accesses to code which has previously been translated into a TB show
442 * up in the MMIO path, as accesses to the io_mem_notdirty
443 * MemoryRegion. */
444 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800445 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
446 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800447 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800448 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300449 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100450 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300451}
452
Peter Maydellcc05c432015-04-26 16:49:23 +0100453static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
454 hwaddr addr,
455 uint64_t *value,
456 unsigned size,
457 unsigned shift,
458 uint64_t mask,
459 MemTxAttrs attrs)
460{
461 uint64_t tmp = 0;
462 MemTxResult r;
463
Peter Maydellcc05c432015-04-26 16:49:23 +0100464 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800465 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800466 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800467 } else if (mr == &io_mem_notdirty) {
468 /* Accesses to code which has previously been translated into a TB show
469 * up in the MMIO path, as accesses to the io_mem_notdirty
470 * MemoryRegion. */
471 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800472 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
473 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800474 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800475 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100476 *value |= (tmp & mask) << shift;
477 return r;
478}
479
480static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
481 hwaddr addr,
482 uint64_t *value,
483 unsigned size,
484 unsigned shift,
485 uint64_t mask,
486 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200487{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200488 uint64_t tmp;
489
490 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800491 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800492 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800493 } else if (mr == &io_mem_notdirty) {
494 /* Accesses to code which has previously been translated into a TB show
495 * up in the MMIO path, as accesses to the io_mem_notdirty
496 * MemoryRegion. */
497 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800498 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
499 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800500 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800501 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200502 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100503 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200504}
505
Peter Maydellcc05c432015-04-26 16:49:23 +0100506static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
507 hwaddr addr,
508 uint64_t *value,
509 unsigned size,
510 unsigned shift,
511 uint64_t mask,
512 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300513{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300514 uint64_t tmp;
515
516 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800517 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800518 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800519 } else if (mr == &io_mem_notdirty) {
520 /* Accesses to code which has previously been translated into a TB show
521 * up in the MMIO path, as accesses to the io_mem_notdirty
522 * MemoryRegion. */
523 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800524 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
525 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800526 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800527 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300528 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100529 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300530}
531
Peter Maydellcc05c432015-04-26 16:49:23 +0100532static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
533 hwaddr addr,
534 uint64_t *value,
535 unsigned size,
536 unsigned shift,
537 uint64_t mask,
538 MemTxAttrs attrs)
539{
540 uint64_t tmp;
541
Peter Maydellcc05c432015-04-26 16:49:23 +0100542 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800543 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800544 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800545 } else if (mr == &io_mem_notdirty) {
546 /* Accesses to code which has previously been translated into a TB show
547 * up in the MMIO path, as accesses to the io_mem_notdirty
548 * MemoryRegion. */
549 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800550 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
551 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800552 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800553 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100554 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
555}
556
557static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300558 uint64_t *value,
559 unsigned size,
560 unsigned access_size_min,
561 unsigned access_size_max,
Peter Maydellcc05c432015-04-26 16:49:23 +0100562 MemTxResult (*access)(MemoryRegion *mr,
563 hwaddr addr,
564 uint64_t *value,
565 unsigned size,
566 unsigned shift,
567 uint64_t mask,
568 MemTxAttrs attrs),
569 MemoryRegion *mr,
570 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300571{
572 uint64_t access_mask;
573 unsigned access_size;
574 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100575 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300576
577 if (!access_size_min) {
578 access_size_min = 1;
579 }
580 if (!access_size_max) {
581 access_size_max = 4;
582 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200583
584 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300585 access_size = MAX(MIN(size, access_size_max), access_size_min);
586 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200587 if (memory_region_big_endian(mr)) {
588 for (i = 0; i < size; i += access_size) {
Peter Maydellcc05c432015-04-26 16:49:23 +0100589 r |= access(mr, addr + i, value, access_size,
590 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200591 }
592 } else {
593 for (i = 0; i < size; i += access_size) {
Peter Maydellcc05c432015-04-26 16:49:23 +0100594 r |= access(mr, addr + i, value, access_size, i * 8,
595 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200596 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300597 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100598 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300599}
600
Avi Kivitye2177952011-12-08 15:00:18 +0200601static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
602{
Avi Kivity0d673e32012-10-02 15:28:50 +0200603 AddressSpace *as;
604
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200605 while (mr->container) {
606 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200607 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200608 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
609 if (mr == as->root) {
610 return as;
611 }
Avi Kivitye2177952011-12-08 15:00:18 +0200612 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200613 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200614}
615
Avi Kivity093bc2c2011-07-26 14:26:01 +0300616/* Render a memory region into the global view. Ranges in @view obscure
617 * ranges in @mr.
618 */
619static void render_memory_region(FlatView *view,
620 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200621 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300622 AddrRange clip,
623 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300624{
625 MemoryRegion *subregion;
626 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200627 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200628 Int128 remain;
629 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300630 FlatRange fr;
631 AddrRange tmp;
632
Avi Kivity6bba19b2011-09-14 11:54:58 +0300633 if (!mr->enabled) {
634 return;
635 }
636
Avi Kivity08dafab2011-10-16 13:19:17 +0200637 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300638 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300639
640 tmp = addrrange_make(base, mr->size);
641
642 if (!addrrange_intersects(tmp, clip)) {
643 return;
644 }
645
646 clip = addrrange_intersection(tmp, clip);
647
648 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200649 int128_subfrom(&base, int128_make64(mr->alias->addr));
650 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300651 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300652 return;
653 }
654
655 /* Render subregions in priority order. */
656 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300657 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300658 }
659
Avi Kivity14a3c102011-07-26 14:26:06 +0300660 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300661 return;
662 }
663
Avi Kivity08dafab2011-10-16 13:19:17 +0200664 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300665 base = clip.start;
666 remain = clip.size;
667
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000668 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100669 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200670 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000671 fr.readonly = readonly;
672
Avi Kivity093bc2c2011-07-26 14:26:01 +0300673 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200674 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
675 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300676 continue;
677 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200678 if (int128_lt(base, view->ranges[i].addr.start)) {
679 now = int128_min(remain,
680 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300681 fr.offset_in_region = offset_in_region;
682 fr.addr = addrrange_make(base, now);
683 flatview_insert(view, i, &fr);
684 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200685 int128_addto(&base, now);
686 offset_in_region += int128_get64(now);
687 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300688 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200689 now = int128_sub(int128_min(int128_add(base, remain),
690 addrrange_end(view->ranges[i].addr)),
691 base);
692 int128_addto(&base, now);
693 offset_in_region += int128_get64(now);
694 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300695 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200696 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300697 fr.offset_in_region = offset_in_region;
698 fr.addr = addrrange_make(base, remain);
699 flatview_insert(view, i, &fr);
700 }
701}
702
703/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200704static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300705{
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200706 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300707
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200708 view = g_new(FlatView, 1);
709 flatview_init(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300710
Avi Kivity83f3c252012-10-07 12:59:55 +0200711 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200712 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200713 addrrange_make(int128_zero(), int128_2_64()), false);
714 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200715 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300716
717 return view;
718}
719
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300720static void address_space_add_del_ioeventfds(AddressSpace *as,
721 MemoryRegionIoeventfd *fds_new,
722 unsigned fds_new_nb,
723 MemoryRegionIoeventfd *fds_old,
724 unsigned fds_old_nb)
725{
726 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200727 MemoryRegionIoeventfd *fd;
728 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300729
730 /* Generate a symmetric difference of the old and new fd sets, adding
731 * and deleting as necessary.
732 */
733
734 iold = inew = 0;
735 while (iold < fds_old_nb || inew < fds_new_nb) {
736 if (iold < fds_old_nb
737 && (inew == fds_new_nb
738 || memory_region_ioeventfd_before(fds_old[iold],
739 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200740 fd = &fds_old[iold];
741 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200742 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200743 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200744 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200745 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200746 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200747 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300748 ++iold;
749 } else if (inew < fds_new_nb
750 && (iold == fds_old_nb
751 || memory_region_ioeventfd_before(fds_new[inew],
752 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200753 fd = &fds_new[inew];
754 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200755 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200756 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200757 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200758 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200759 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200760 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300761 ++inew;
762 } else {
763 ++iold;
764 ++inew;
765 }
766 }
767}
768
Paolo Bonzini856d7242013-05-06 11:57:21 +0200769static FlatView *address_space_get_flatview(AddressSpace *as)
770{
771 FlatView *view;
772
Paolo Bonzini374f2982013-05-17 12:37:03 +0200773 rcu_read_lock();
774 view = atomic_rcu_read(&as->current_map);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200775 flatview_ref(view);
Paolo Bonzini374f2982013-05-17 12:37:03 +0200776 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200777 return view;
778}
779
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300780static void address_space_update_ioeventfds(AddressSpace *as)
781{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200782 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300783 FlatRange *fr;
784 unsigned ioeventfd_nb = 0;
785 MemoryRegionIoeventfd *ioeventfds = NULL;
786 AddrRange tmp;
787 unsigned i;
788
Paolo Bonzini856d7242013-05-06 11:57:21 +0200789 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200790 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300791 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
792 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200793 int128_sub(fr->addr.start,
794 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300795 if (addrrange_intersects(fr->addr, tmp)) {
796 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500797 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300798 ioeventfd_nb * sizeof(*ioeventfds));
799 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
800 ioeventfds[ioeventfd_nb-1].addr = tmp;
801 }
802 }
803 }
804
805 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
806 as->ioeventfds, as->ioeventfd_nb);
807
Anthony Liguori7267c092011-08-20 22:09:37 -0500808 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300809 as->ioeventfds = ioeventfds;
810 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200811 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300812}
813
Avi Kivityb8af1af2011-07-26 14:26:12 +0300814static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200815 const FlatView *old_view,
816 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300817 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300818{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300819 unsigned iold, inew;
820 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300821
822 /* Generate a symmetric difference of the old and new memory maps.
823 * Kill ranges in the old map, and instantiate ranges in the new map.
824 */
825 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200826 while (iold < old_view->nr || inew < new_view->nr) {
827 if (iold < old_view->nr) {
828 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300829 } else {
830 frold = NULL;
831 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200832 if (inew < new_view->nr) {
833 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300834 } else {
835 frnew = NULL;
836 }
837
838 if (frold
839 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200840 || int128_lt(frold->addr.start, frnew->addr.start)
841 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300842 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000843 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300844
Avi Kivityb8af1af2011-07-26 14:26:12 +0300845 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200846 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300847 }
848
Avi Kivity093bc2c2011-07-26 14:26:01 +0300849 ++iold;
850 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000851 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300852
Avi Kivityb8af1af2011-07-26 14:26:12 +0300853 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200854 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200855 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
856 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
857 frold->dirty_log_mask,
858 frnew->dirty_log_mask);
859 }
860 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
861 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
862 frold->dirty_log_mask,
863 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300864 }
Avi Kivity5a583342011-07-26 14:26:02 +0300865 }
866
Avi Kivity093bc2c2011-07-26 14:26:01 +0300867 ++iold;
868 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300869 } else {
870 /* In new */
871
Avi Kivityb8af1af2011-07-26 14:26:12 +0300872 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200873 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300874 }
875
Avi Kivity093bc2c2011-07-26 14:26:01 +0300876 ++inew;
877 }
878 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300879}
880
881
882static void address_space_update_topology(AddressSpace *as)
883{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200884 FlatView *old_view = address_space_get_flatview(as);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200885 FlatView *new_view = generate_memory_topology(as->root);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300886
887 address_space_update_topology_pass(as, old_view, new_view, false);
888 address_space_update_topology_pass(as, old_view, new_view, true);
889
Paolo Bonzini374f2982013-05-17 12:37:03 +0200890 /* Writes are protected by the BQL. */
891 atomic_rcu_set(&as->current_map, new_view);
892 call_rcu(old_view, flatview_unref, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200893
894 /* Note that all the old MemoryRegions are still alive up to this
895 * point. This relieves most MemoryListeners from the need to
896 * ref/unref the MemoryRegions they get---unless they use them
897 * outside the iothread mutex, in which case precise reference
898 * counting is necessary.
899 */
900 flatview_unref(old_view);
901
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300902 address_space_update_ioeventfds(as);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300903}
904
Avi Kivity4ef4db82011-07-26 14:26:13 +0300905void memory_region_transaction_begin(void)
906{
Jan Kiszkabb880de2012-08-23 13:02:32 +0200907 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +0300908 ++memory_region_transaction_depth;
909}
910
911void memory_region_transaction_commit(void)
912{
Avi Kivity0d673e32012-10-02 15:28:50 +0200913 AddressSpace *as;
914
Avi Kivity4ef4db82011-07-26 14:26:13 +0300915 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000916 assert(qemu_mutex_iothread_locked());
917
Avi Kivity4ef4db82011-07-26 14:26:13 +0300918 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +0800919 if (!memory_region_transaction_depth) {
920 if (memory_region_update_pending) {
921 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +0200922
Gonglei4dc56152014-05-08 11:47:32 +0800923 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
924 address_space_update_topology(as);
925 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000926 memory_region_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +0800927 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
928 } else if (ioeventfd_update_pending) {
929 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
930 address_space_update_ioeventfds(as);
931 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000932 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +0200933 }
Gonglei4dc56152014-05-08 11:47:32 +0800934 }
Avi Kivity4ef4db82011-07-26 14:26:13 +0300935}
936
Avi Kivity545e92e2011-08-08 19:58:48 +0300937static void memory_region_destructor_none(MemoryRegion *mr)
938{
939}
940
941static void memory_region_destructor_ram(MemoryRegion *mr)
942{
Fam Zhengf1060c52016-03-01 14:18:22 +0800943 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +0300944}
945
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700946static bool memory_region_need_escape(char c)
947{
948 return c == '/' || c == '[' || c == '\\' || c == ']';
949}
950
951static char *memory_region_escape_name(const char *name)
952{
953 const char *p;
954 char *escaped, *q;
955 uint8_t c;
956 size_t bytes = 0;
957
958 for (p = name; *p; p++) {
959 bytes += memory_region_need_escape(*p) ? 4 : 1;
960 }
961 if (bytes == p - name) {
962 return g_memdup(name, bytes + 1);
963 }
964
965 escaped = g_malloc(bytes + 1);
966 for (p = name, q = escaped; *p; p++) {
967 c = *p;
968 if (unlikely(memory_region_need_escape(c))) {
969 *q++ = '\\';
970 *q++ = 'x';
971 *q++ = "0123456789abcdef"[c >> 4];
972 c = "0123456789abcdef"[c & 15];
973 }
974 *q++ = c;
975 }
976 *q = 0;
977 return escaped;
978}
979
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +1000980static void memory_region_do_init(MemoryRegion *mr,
981 Object *owner,
982 const char *name,
983 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300984{
Avi Kivity08dafab2011-10-16 13:19:17 +0200985 mr->size = int128_make64(size);
986 if (size == UINT64_MAX) {
987 mr->size = int128_2_64();
988 }
Peter Maydell302fa282014-08-19 20:05:46 +0100989 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +0100990 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +0800991 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700992
993 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -0700994 char *escaped_name = memory_region_escape_name(name);
995 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +0100996
997 if (!owner) {
998 owner = container_get(qdev_get_machine(), "/unattached");
999 }
1000
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001001 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001002 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001003 g_free(name_array);
1004 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001005 }
1006}
1007
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001008void memory_region_init(MemoryRegion *mr,
1009 Object *owner,
1010 const char *name,
1011 uint64_t size)
1012{
1013 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1014 memory_region_do_init(mr, owner, name, size);
1015}
1016
Eric Blaked7bce992016-01-29 06:48:55 -07001017static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1018 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001019{
1020 MemoryRegion *mr = MEMORY_REGION(obj);
1021 uint64_t value = mr->addr;
1022
Eric Blake51e72bc2016-01-29 06:48:54 -07001023 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001024}
1025
Eric Blaked7bce992016-01-29 06:48:55 -07001026static void memory_region_get_container(Object *obj, Visitor *v,
1027 const char *name, void *opaque,
1028 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001029{
1030 MemoryRegion *mr = MEMORY_REGION(obj);
1031 gchar *path = (gchar *)"";
1032
1033 if (mr->container) {
1034 path = object_get_canonical_path(OBJECT(mr->container));
1035 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001036 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001037 if (mr->container) {
1038 g_free(path);
1039 }
1040}
1041
1042static Object *memory_region_resolve_container(Object *obj, void *opaque,
1043 const char *part)
1044{
1045 MemoryRegion *mr = MEMORY_REGION(obj);
1046
1047 return OBJECT(mr->container);
1048}
1049
Eric Blaked7bce992016-01-29 06:48:55 -07001050static void memory_region_get_priority(Object *obj, Visitor *v,
1051 const char *name, void *opaque,
1052 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001053{
1054 MemoryRegion *mr = MEMORY_REGION(obj);
1055 int32_t value = mr->priority;
1056
Eric Blake51e72bc2016-01-29 06:48:54 -07001057 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001058}
1059
Eric Blaked7bce992016-01-29 06:48:55 -07001060static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1061 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001062{
1063 MemoryRegion *mr = MEMORY_REGION(obj);
1064 uint64_t value = memory_region_size(mr);
1065
Eric Blake51e72bc2016-01-29 06:48:54 -07001066 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001067}
1068
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001069static void memory_region_initfn(Object *obj)
1070{
1071 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001072 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001073
1074 mr->ops = &unassigned_mem_ops;
1075 mr->enabled = true;
1076 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001077 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001078 mr->destructor = memory_region_destructor_none;
1079 QTAILQ_INIT(&mr->subregions);
1080 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001081
1082 op = object_property_add(OBJECT(mr), "container",
1083 "link<" TYPE_MEMORY_REGION ">",
1084 memory_region_get_container,
1085 NULL, /* memory_region_set_container */
1086 NULL, NULL, &error_abort);
1087 op->resolve = memory_region_resolve_container;
1088
1089 object_property_add(OBJECT(mr), "addr", "uint64",
1090 memory_region_get_addr,
1091 NULL, /* memory_region_set_addr */
1092 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001093 object_property_add(OBJECT(mr), "priority", "uint32",
1094 memory_region_get_priority,
1095 NULL, /* memory_region_set_priority */
1096 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001097 object_property_add(OBJECT(mr), "size", "uint64",
1098 memory_region_get_size,
1099 NULL, /* memory_region_set_size, */
1100 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001101}
1102
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001103static void iommu_memory_region_initfn(Object *obj)
1104{
1105 MemoryRegion *mr = MEMORY_REGION(obj);
1106
1107 mr->is_iommu = true;
1108}
1109
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001110static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1111 unsigned size)
1112{
1113#ifdef DEBUG_UNASSIGNED
1114 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1115#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001116 if (current_cpu != NULL) {
1117 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001118 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001119 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001120}
1121
1122static void unassigned_mem_write(void *opaque, hwaddr addr,
1123 uint64_t val, unsigned size)
1124{
1125#ifdef DEBUG_UNASSIGNED
1126 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1127#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001128 if (current_cpu != NULL) {
1129 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001130 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001131}
1132
Paolo Bonzinid1970632013-05-24 13:23:38 +02001133static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1134 unsigned size, bool is_write)
1135{
1136 return false;
1137}
1138
1139const MemoryRegionOps unassigned_mem_ops = {
1140 .valid.accepts = unassigned_mem_accepts,
1141 .endianness = DEVICE_NATIVE_ENDIAN,
1142};
1143
Alex Williamson4a2e2422016-10-31 09:53:03 -06001144static uint64_t memory_region_ram_device_read(void *opaque,
1145 hwaddr addr, unsigned size)
1146{
1147 MemoryRegion *mr = opaque;
1148 uint64_t data = (uint64_t)~0;
1149
1150 switch (size) {
1151 case 1:
1152 data = *(uint8_t *)(mr->ram_block->host + addr);
1153 break;
1154 case 2:
1155 data = *(uint16_t *)(mr->ram_block->host + addr);
1156 break;
1157 case 4:
1158 data = *(uint32_t *)(mr->ram_block->host + addr);
1159 break;
1160 case 8:
1161 data = *(uint64_t *)(mr->ram_block->host + addr);
1162 break;
1163 }
1164
1165 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1166
1167 return data;
1168}
1169
1170static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1171 uint64_t data, unsigned size)
1172{
1173 MemoryRegion *mr = opaque;
1174
1175 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1176
1177 switch (size) {
1178 case 1:
1179 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1180 break;
1181 case 2:
1182 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1183 break;
1184 case 4:
1185 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1186 break;
1187 case 8:
1188 *(uint64_t *)(mr->ram_block->host + addr) = data;
1189 break;
1190 }
1191}
1192
1193static const MemoryRegionOps ram_device_mem_ops = {
1194 .read = memory_region_ram_device_read,
1195 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001196 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001197 .valid = {
1198 .min_access_size = 1,
1199 .max_access_size = 8,
1200 .unaligned = true,
1201 },
1202 .impl = {
1203 .min_access_size = 1,
1204 .max_access_size = 8,
1205 .unaligned = true,
1206 },
1207};
1208
Paolo Bonzinid2702032013-05-24 11:55:06 +02001209bool memory_region_access_valid(MemoryRegion *mr,
1210 hwaddr addr,
1211 unsigned size,
1212 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001213{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001214 int access_size_min, access_size_max;
1215 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001216
Avi Kivity093bc2c2011-07-26 14:26:01 +03001217 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1218 return false;
1219 }
1220
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001221 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001222 return true;
1223 }
1224
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001225 access_size_min = mr->ops->valid.min_access_size;
1226 if (!mr->ops->valid.min_access_size) {
1227 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001228 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001229
1230 access_size_max = mr->ops->valid.max_access_size;
1231 if (!mr->ops->valid.max_access_size) {
1232 access_size_max = 4;
1233 }
1234
1235 access_size = MAX(MIN(size, access_size_max), access_size_min);
1236 for (i = 0; i < size; i += access_size) {
1237 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1238 is_write)) {
1239 return false;
1240 }
1241 }
1242
Avi Kivity093bc2c2011-07-26 14:26:01 +03001243 return true;
1244}
1245
Peter Maydellcc05c432015-04-26 16:49:23 +01001246static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1247 hwaddr addr,
1248 uint64_t *pval,
1249 unsigned size,
1250 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001251{
Peter Maydellcc05c432015-04-26 16:49:23 +01001252 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001253
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001254 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001255 return access_with_adjusted_size(addr, pval, size,
1256 mr->ops->impl.min_access_size,
1257 mr->ops->impl.max_access_size,
1258 memory_region_read_accessor,
1259 mr, attrs);
1260 } else if (mr->ops->read_with_attrs) {
1261 return access_with_adjusted_size(addr, pval, size,
1262 mr->ops->impl.min_access_size,
1263 mr->ops->impl.max_access_size,
1264 memory_region_read_with_attrs_accessor,
1265 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001266 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001267 return access_with_adjusted_size(addr, pval, size, 1, 4,
1268 memory_region_oldmmio_read_accessor,
1269 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001270 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001271}
1272
Peter Maydell3b643492015-04-26 16:49:23 +01001273MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1274 hwaddr addr,
1275 uint64_t *pval,
1276 unsigned size,
1277 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001278{
Peter Maydellcc05c432015-04-26 16:49:23 +01001279 MemTxResult r;
1280
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001281 if (!memory_region_access_valid(mr, addr, size, false)) {
1282 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001283 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001284 }
Avi Kivitya621f382012-01-02 13:12:08 +02001285
Peter Maydellcc05c432015-04-26 16:49:23 +01001286 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001287 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001288 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001289}
1290
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001291/* Return true if an eventfd was signalled */
1292static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1293 hwaddr addr,
1294 uint64_t data,
1295 unsigned size,
1296 MemTxAttrs attrs)
1297{
1298 MemoryRegionIoeventfd ioeventfd = {
1299 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1300 .data = data,
1301 };
1302 unsigned i;
1303
1304 for (i = 0; i < mr->ioeventfd_nb; i++) {
1305 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1306 ioeventfd.e = mr->ioeventfds[i].e;
1307
1308 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1309 event_notifier_set(ioeventfd.e);
1310 return true;
1311 }
1312 }
1313
1314 return false;
1315}
1316
Peter Maydell3b643492015-04-26 16:49:23 +01001317MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1318 hwaddr addr,
1319 uint64_t data,
1320 unsigned size,
1321 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001322{
Avi Kivity897fa7c2011-11-13 13:05:27 +02001323 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001324 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001325 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001326 }
1327
Avi Kivitya621f382012-01-02 13:12:08 +02001328 adjust_endianness(mr, &data, size);
1329
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001330 if ((!kvm_eventfds_enabled()) &&
1331 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1332 return MEMTX_OK;
1333 }
1334
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001335 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001336 return access_with_adjusted_size(addr, &data, size,
1337 mr->ops->impl.min_access_size,
1338 mr->ops->impl.max_access_size,
1339 memory_region_write_accessor, mr,
1340 attrs);
1341 } else if (mr->ops->write_with_attrs) {
1342 return
1343 access_with_adjusted_size(addr, &data, size,
1344 mr->ops->impl.min_access_size,
1345 mr->ops->impl.max_access_size,
1346 memory_region_write_with_attrs_accessor,
1347 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001348 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001349 return access_with_adjusted_size(addr, &data, size, 1, 4,
1350 memory_region_oldmmio_write_accessor,
1351 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001352 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001353}
1354
Avi Kivity093bc2c2011-07-26 14:26:01 +03001355void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001356 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001357 const MemoryRegionOps *ops,
1358 void *opaque,
1359 const char *name,
1360 uint64_t size)
1361{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001362 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001363 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001364 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001365 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001366}
1367
1368void memory_region_init_ram(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001369 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001370 const char *name,
Hu Tao49946532014-09-09 13:27:55 +08001371 uint64_t size,
1372 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001373{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001374 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001375 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001376 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001377 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001378 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001379 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001380}
1381
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001382void memory_region_init_resizeable_ram(MemoryRegion *mr,
1383 Object *owner,
1384 const char *name,
1385 uint64_t size,
1386 uint64_t max_size,
1387 void (*resized)(const char*,
1388 uint64_t length,
1389 void *host),
1390 Error **errp)
1391{
1392 memory_region_init(mr, owner, name, size);
1393 mr->ram = true;
1394 mr->terminates = true;
1395 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001396 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1397 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001398 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001399}
1400
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001401#ifdef __linux__
1402void memory_region_init_ram_from_file(MemoryRegion *mr,
1403 struct Object *owner,
1404 const char *name,
1405 uint64_t size,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001406 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001407 const char *path,
1408 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001409{
1410 memory_region_init(mr, owner, name, size);
1411 mr->ram = true;
1412 mr->terminates = true;
1413 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001414 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001415 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001416}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001417
1418void memory_region_init_ram_from_fd(MemoryRegion *mr,
1419 struct Object *owner,
1420 const char *name,
1421 uint64_t size,
1422 bool share,
1423 int fd,
1424 Error **errp)
1425{
1426 memory_region_init(mr, owner, name, size);
1427 mr->ram = true;
1428 mr->terminates = true;
1429 mr->destructor = memory_region_destructor_ram;
1430 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1431 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1432}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001433#endif
1434
Avi Kivity093bc2c2011-07-26 14:26:01 +03001435void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001436 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001437 const char *name,
1438 uint64_t size,
1439 void *ptr)
1440{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001441 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001442 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001443 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001444 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001445 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001446
1447 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1448 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001449 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001450}
1451
Alex Williamson21e00fa2016-10-31 09:53:03 -06001452void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1453 Object *owner,
1454 const char *name,
1455 uint64_t size,
1456 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301457{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001458 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1459 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001460 mr->ops = &ram_device_mem_ops;
1461 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301462}
1463
Avi Kivity093bc2c2011-07-26 14:26:01 +03001464void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001465 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001466 const char *name,
1467 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001468 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001469 uint64_t size)
1470{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001471 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001472 mr->alias = orig;
1473 mr->alias_offset = offset;
1474}
1475
Peter Maydella1777f72016-07-04 13:06:35 +01001476void memory_region_init_rom(MemoryRegion *mr,
1477 struct Object *owner,
1478 const char *name,
1479 uint64_t size,
1480 Error **errp)
1481{
1482 memory_region_init(mr, owner, name, size);
1483 mr->ram = true;
1484 mr->readonly = true;
1485 mr->terminates = true;
1486 mr->destructor = memory_region_destructor_ram;
1487 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1488 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1489}
1490
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001491void memory_region_init_rom_device(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001492 Object *owner,
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001493 const MemoryRegionOps *ops,
Avi Kivity75f59412011-08-26 00:35:15 +03001494 void *opaque,
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001495 const char *name,
Hu Tao33e0eb52014-09-09 13:27:57 +08001496 uint64_t size,
1497 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001498{
Peter Maydell39e0b032016-07-04 13:06:35 +01001499 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001500 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001501 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001502 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001503 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001504 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001505 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001506 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001507}
1508
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001509void memory_region_init_iommu(IOMMUMemoryRegion *iommu_mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001510 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001511 const MemoryRegionIOMMUOps *ops,
1512 const char *name,
1513 uint64_t size)
1514{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001515 struct MemoryRegion *mr;
1516
1517 object_initialize(iommu_mr, sizeof(*iommu_mr), TYPE_IOMMU_MEMORY_REGION);
1518 mr = MEMORY_REGION(iommu_mr);
1519 memory_region_do_init(mr, owner, name, size);
1520 iommu_mr = IOMMU_MEMORY_REGION(mr);
1521 iommu_mr->iommu_ops = ops,
Avi Kivity30951152012-10-30 13:47:46 +02001522 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001523 QLIST_INIT(&iommu_mr->iommu_notify);
1524 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001525}
1526
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001527static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001528{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001529 MemoryRegion *mr = MEMORY_REGION(obj);
1530
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001531 assert(!mr->container);
1532
1533 /* We know the region is not visible in any address space (it
1534 * does not have a container and cannot be a root either because
1535 * it has no references, so we can blindly clear mr->enabled.
1536 * memory_region_set_enabled instead could trigger a transaction
1537 * and cause an infinite loop.
1538 */
1539 mr->enabled = false;
1540 memory_region_transaction_begin();
1541 while (!QTAILQ_EMPTY(&mr->subregions)) {
1542 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1543 memory_region_del_subregion(mr, subregion);
1544 }
1545 memory_region_transaction_commit();
1546
Avi Kivity545e92e2011-08-08 19:58:48 +03001547 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001548 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001549 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001550 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001551}
1552
Paolo Bonzini803c0812013-05-07 06:59:09 +02001553Object *memory_region_owner(MemoryRegion *mr)
1554{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001555 Object *obj = OBJECT(mr);
1556 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001557}
1558
Paolo Bonzini46637be2013-05-07 09:06:00 +02001559void memory_region_ref(MemoryRegion *mr)
1560{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001561 /* MMIO callbacks most likely will access data that belongs
1562 * to the owner, hence the need to ref/unref the owner whenever
1563 * the memory region is in use.
1564 *
1565 * The memory region is a child of its owner. As long as the
1566 * owner doesn't call unparent itself on the memory region,
1567 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001568 * Memory regions without an owner are supposed to never go away;
1569 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001570 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001571 if (mr && mr->owner) {
1572 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001573 }
1574}
1575
1576void memory_region_unref(MemoryRegion *mr)
1577{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001578 if (mr && mr->owner) {
1579 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001580 }
1581}
1582
Avi Kivity093bc2c2011-07-26 14:26:01 +03001583uint64_t memory_region_size(MemoryRegion *mr)
1584{
Avi Kivity08dafab2011-10-16 13:19:17 +02001585 if (int128_eq(mr->size, int128_2_64())) {
1586 return UINT64_MAX;
1587 }
1588 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001589}
1590
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001591const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001592{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001593 if (!mr->name) {
1594 ((MemoryRegion *)mr)->name =
1595 object_get_canonical_path_component(OBJECT(mr));
1596 }
Peter Maydell302fa282014-08-19 20:05:46 +01001597 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001598}
1599
Alex Williamson21e00fa2016-10-31 09:53:03 -06001600bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301601{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001602 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301603}
1604
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001605uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001606{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001607 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001608 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001609 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1610 }
1611 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001612}
1613
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001614bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1615{
1616 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1617}
1618
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001619static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001620{
1621 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1622 IOMMUNotifier *iommu_notifier;
1623
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001624 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001625 flags |= iommu_notifier->notifier_flags;
1626 }
1627
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001628 if (flags != iommu_mr->iommu_notify_flags &&
1629 iommu_mr->iommu_ops->notify_flag_changed) {
1630 iommu_mr->iommu_ops->notify_flag_changed(iommu_mr,
1631 iommu_mr->iommu_notify_flags,
1632 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001633 }
1634
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001635 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001636}
1637
Peter Xucdb30812016-09-23 13:02:26 +08001638void memory_region_register_iommu_notifier(MemoryRegion *mr,
1639 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001640{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001641 IOMMUMemoryRegion *iommu_mr;
1642
Jason Wangefcd38c2016-12-30 18:09:17 +08001643 if (mr->alias) {
1644 memory_region_register_iommu_notifier(mr->alias, n);
1645 return;
1646 }
1647
Peter Xucdb30812016-09-23 13:02:26 +08001648 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001649 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001650 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001651 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001652 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1653 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001654}
1655
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001656uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001657{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001658 if (iommu_mr->iommu_ops && iommu_mr->iommu_ops->get_min_page_size) {
1659 return iommu_mr->iommu_ops->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001660 }
1661 return TARGET_PAGE_SIZE;
1662}
1663
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001664void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001665{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001666 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001667 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001668 IOMMUTLBEntry iotlb;
1669
Peter Xufaa362e2017-04-07 18:59:11 +08001670 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001671 if (iommu_mr->iommu_ops->replay) {
1672 iommu_mr->iommu_ops->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001673 return;
1674 }
1675
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001676 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001677
David Gibsona788f222015-09-30 12:13:55 +10001678 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001679 iotlb = iommu_mr->iommu_ops->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001680 if (iotlb.perm != IOMMU_NONE) {
1681 n->notify(n, &iotlb);
1682 }
1683
1684 /* if (2^64 - MR size) < granularity, it's possible to get an
1685 * infinite loop here. This should catch such a wraparound */
1686 if ((addr + granularity) < addr) {
1687 break;
1688 }
1689 }
1690}
1691
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001692void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001693{
1694 IOMMUNotifier *notifier;
1695
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001696 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1697 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001698 }
1699}
1700
Peter Xucdb30812016-09-23 13:02:26 +08001701void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1702 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001703{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001704 IOMMUMemoryRegion *iommu_mr;
1705
Jason Wangefcd38c2016-12-30 18:09:17 +08001706 if (mr->alias) {
1707 memory_region_unregister_iommu_notifier(mr->alias, n);
1708 return;
1709 }
Peter Xucdb30812016-09-23 13:02:26 +08001710 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001711 iommu_mr = IOMMU_MEMORY_REGION(mr);
1712 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001713}
1714
Peter Xubd2bfa42017-04-07 18:59:10 +08001715void memory_region_notify_one(IOMMUNotifier *notifier,
1716 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001717{
Peter Xucdb30812016-09-23 13:02:26 +08001718 IOMMUNotifierFlag request_flags;
1719
Peter Xubd2bfa42017-04-07 18:59:10 +08001720 /*
1721 * Skip the notification if the notification does not overlap
1722 * with registered range.
1723 */
1724 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1725 notifier->end < entry->iova) {
1726 return;
1727 }
Peter Xucdb30812016-09-23 13:02:26 +08001728
Peter Xubd2bfa42017-04-07 18:59:10 +08001729 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001730 request_flags = IOMMU_NOTIFIER_MAP;
1731 } else {
1732 request_flags = IOMMU_NOTIFIER_UNMAP;
1733 }
1734
Peter Xubd2bfa42017-04-07 18:59:10 +08001735 if (notifier->notifier_flags & request_flags) {
1736 notifier->notify(notifier, entry);
1737 }
1738}
1739
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001740void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001741 IOMMUTLBEntry entry)
1742{
1743 IOMMUNotifier *iommu_notifier;
1744
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001745 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001746
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001747 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001748 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001749 }
David Gibson06866572013-05-14 19:13:56 +10001750}
1751
Avi Kivity093bc2c2011-07-26 14:26:01 +03001752void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1753{
Avi Kivity5a583342011-07-26 14:26:02 +03001754 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001755 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001756
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001757 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001758 old_logging = mr->vga_logging_count;
1759 mr->vga_logging_count += log ? 1 : -1;
1760 if (!!old_logging == !!mr->vga_logging_count) {
1761 return;
1762 }
1763
Jan Kiszka59023ef2012-08-23 13:02:30 +02001764 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001765 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001766 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001767 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001768}
1769
Avi Kivitya8170e52012-10-23 12:30:10 +02001770bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1771 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001772{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001773 assert(mr->ram_block);
1774 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1775 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001776}
1777
Avi Kivitya8170e52012-10-23 12:30:10 +02001778void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1779 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001780{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001781 assert(mr->ram_block);
1782 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1783 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001784 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001785}
1786
Juan Quintela6c279db2012-10-17 20:24:28 +02001787bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1788 hwaddr size, unsigned client)
1789{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001790 assert(mr->ram_block);
1791 return cpu_physical_memory_test_and_clear_dirty(
1792 memory_region_get_ram_addr(mr) + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001793}
1794
Gerd Hoffmann8deaf122017-04-21 11:16:25 +02001795DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1796 hwaddr addr,
1797 hwaddr size,
1798 unsigned client)
1799{
1800 assert(mr->ram_block);
1801 return cpu_physical_memory_snapshot_and_clear_dirty(
1802 memory_region_get_ram_addr(mr) + addr, size, client);
1803}
1804
1805bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1806 hwaddr addr, hwaddr size)
1807{
1808 assert(mr->ram_block);
1809 return cpu_physical_memory_snapshot_get_dirty(snap,
1810 memory_region_get_ram_addr(mr) + addr, size);
1811}
Juan Quintela6c279db2012-10-17 20:24:28 +02001812
Avi Kivity093bc2c2011-07-26 14:26:01 +03001813void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1814{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001815 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001816 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001817 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001818 FlatRange *fr;
1819
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001820 /* If the same address space has multiple log_sync listeners, we
1821 * visit that address space's FlatView multiple times. But because
1822 * log_sync listeners are rare, it's still cheaper than walking each
1823 * address space once.
1824 */
1825 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1826 if (!listener->log_sync) {
1827 continue;
1828 }
1829 as = listener->address_space;
1830 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001831 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001832 if (fr->mr == mr) {
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001833 MemoryRegionSection mrs = section_from_flat_range(fr, as);
1834 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001835 }
Avi Kivity5a583342011-07-26 14:26:02 +03001836 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001837 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001838 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001839}
1840
1841void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1842{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001843 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001844 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001845 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01001846 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001847 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001848 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001849}
1850
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001851void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001852{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001853 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001854 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001855 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01001856 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001857 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001858 }
1859}
1860
Avi Kivitya8170e52012-10-23 12:30:10 +02001861void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1862 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001863{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001864 assert(mr->ram_block);
1865 cpu_physical_memory_test_and_clear_dirty(
1866 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001867}
1868
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001869int memory_region_get_fd(MemoryRegion *mr)
1870{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001871 int fd;
1872
1873 rcu_read_lock();
1874 while (mr->alias) {
1875 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001876 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001877 fd = mr->ram_block->fd;
1878 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001879
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001880 return fd;
1881}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001882
Avi Kivity093bc2c2011-07-26 14:26:01 +03001883void *memory_region_get_ram_ptr(MemoryRegion *mr)
1884{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001885 void *ptr;
1886 uint64_t offset = 0;
1887
1888 rcu_read_lock();
1889 while (mr->alias) {
1890 offset += mr->alias_offset;
1891 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001892 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08001893 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001894 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001895 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001896
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001897 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001898}
1899
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001900MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1901{
1902 RAMBlock *block;
1903
1904 block = qemu_ram_block_from_host(ptr, false, offset);
1905 if (!block) {
1906 return NULL;
1907 }
1908
1909 return block->mr;
1910}
1911
Fam Zheng7ebb2742016-03-01 14:18:20 +08001912ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1913{
1914 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1915}
1916
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001917void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1918{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001919 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001920
Gongleifa53a0e2016-05-10 10:04:59 +08001921 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001922}
1923
Avi Kivity0d673e32012-10-02 15:28:50 +02001924static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001925{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001926 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001927 FlatRange *fr;
1928 CoalescedMemoryRange *cmr;
1929 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02001930 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001931
Paolo Bonzini856d7242013-05-06 11:57:21 +02001932 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001933 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001934 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02001935 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +02001936 .address_space = as,
Avi Kivity95d29942012-10-02 18:21:54 +02001937 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001938 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02001939 };
1940
Paolo Bonzini9a546352016-09-22 16:23:06 +02001941 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02001942 int128_get64(fr->addr.start),
1943 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001944 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1945 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02001946 int128_sub(fr->addr.start,
1947 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001948 if (!addrrange_intersects(tmp, fr->addr)) {
1949 continue;
1950 }
1951 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02001952 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02001953 int128_get64(tmp.start),
1954 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001955 }
1956 }
1957 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001958 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001959}
1960
Avi Kivity0d673e32012-10-02 15:28:50 +02001961static void memory_region_update_coalesced_range(MemoryRegion *mr)
1962{
1963 AddressSpace *as;
1964
1965 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1966 memory_region_update_coalesced_range_as(mr, as);
1967 }
1968}
1969
Avi Kivity093bc2c2011-07-26 14:26:01 +03001970void memory_region_set_coalescing(MemoryRegion *mr)
1971{
1972 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02001973 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001974}
1975
1976void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001977 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001978 uint64_t size)
1979{
Anthony Liguori7267c092011-08-20 22:09:37 -05001980 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001981
Avi Kivity08dafab2011-10-16 13:19:17 +02001982 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001983 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1984 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02001985 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001986}
1987
1988void memory_region_clear_coalescing(MemoryRegion *mr)
1989{
1990 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08001991 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001992
Jan Kiszkad4105152012-08-23 13:02:29 +02001993 qemu_flush_coalesced_mmio_buffer();
1994 mr->flush_coalesced_mmio = false;
1995
Avi Kivity093bc2c2011-07-26 14:26:01 +03001996 while (!QTAILQ_EMPTY(&mr->coalesced)) {
1997 cmr = QTAILQ_FIRST(&mr->coalesced);
1998 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05001999 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002000 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002001 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002002
2003 if (updated) {
2004 memory_region_update_coalesced_range(mr);
2005 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002006}
2007
Jan Kiszkad4105152012-08-23 13:02:29 +02002008void memory_region_set_flush_coalesced(MemoryRegion *mr)
2009{
2010 mr->flush_coalesced_mmio = true;
2011}
2012
2013void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2014{
2015 qemu_flush_coalesced_mmio_buffer();
2016 if (QTAILQ_EMPTY(&mr->coalesced)) {
2017 mr->flush_coalesced_mmio = false;
2018 }
2019}
2020
Jan Kiszka196ea132015-06-18 18:47:20 +02002021void memory_region_set_global_locking(MemoryRegion *mr)
2022{
2023 mr->global_locking = true;
2024}
2025
2026void memory_region_clear_global_locking(MemoryRegion *mr)
2027{
2028 mr->global_locking = false;
2029}
2030
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002031static bool userspace_eventfd_warning;
2032
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002033void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002034 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002035 unsigned size,
2036 bool match_data,
2037 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002038 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002039{
2040 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002041 .addr.start = int128_make64(addr),
2042 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002043 .match_data = match_data,
2044 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002045 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002046 };
2047 unsigned i;
2048
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002049 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2050 userspace_eventfd_warning))) {
2051 userspace_eventfd_warning = true;
2052 error_report("Using eventfd without MMIO binding in KVM. "
2053 "Suboptimal performance expected");
2054 }
2055
Jason Wangb8aecea2015-11-06 16:02:45 +08002056 if (size) {
2057 adjust_endianness(mr, &mrfd.data, size);
2058 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002059 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002060 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2061 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2062 break;
2063 }
2064 }
2065 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002066 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002067 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2068 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2069 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2070 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002071 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002072 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002073}
2074
2075void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002076 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002077 unsigned size,
2078 bool match_data,
2079 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002080 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002081{
2082 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002083 .addr.start = int128_make64(addr),
2084 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002085 .match_data = match_data,
2086 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002087 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002088 };
2089 unsigned i;
2090
Jason Wangb8aecea2015-11-06 16:02:45 +08002091 if (size) {
2092 adjust_endianness(mr, &mrfd.data, size);
2093 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002094 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002095 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2096 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2097 break;
2098 }
2099 }
2100 assert(i != mr->ioeventfd_nb);
2101 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2102 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2103 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002104 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002105 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002106 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002107 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002108}
2109
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002110static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002111{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002112 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002113 MemoryRegion *other;
2114
Jan Kiszka59023ef2012-08-23 13:02:30 +02002115 memory_region_transaction_begin();
2116
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002117 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002118 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002119 if (subregion->priority >= other->priority) {
2120 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2121 goto done;
2122 }
2123 }
2124 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2125done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002126 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002127 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002128}
2129
Peter Crosthwaite05987012014-06-05 23:14:44 -07002130static void memory_region_add_subregion_common(MemoryRegion *mr,
2131 hwaddr offset,
2132 MemoryRegion *subregion)
2133{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002134 assert(!subregion->container);
2135 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002136 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002137 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002138}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002139
2140void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002141 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002142 MemoryRegion *subregion)
2143{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002144 subregion->priority = 0;
2145 memory_region_add_subregion_common(mr, offset, subregion);
2146}
2147
2148void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002149 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002150 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002151 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002152{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002153 subregion->priority = priority;
2154 memory_region_add_subregion_common(mr, offset, subregion);
2155}
2156
2157void memory_region_del_subregion(MemoryRegion *mr,
2158 MemoryRegion *subregion)
2159{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002160 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002161 assert(subregion->container == mr);
2162 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002163 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002164 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002165 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002166 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002167}
2168
2169void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2170{
2171 if (enabled == mr->enabled) {
2172 return;
2173 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002174 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002175 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002176 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002177 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002178}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002179
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002180void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2181{
2182 Int128 s = int128_make64(size);
2183
2184 if (size == UINT64_MAX) {
2185 s = int128_2_64();
2186 }
2187 if (int128_eq(s, mr->size)) {
2188 return;
2189 }
2190 memory_region_transaction_begin();
2191 mr->size = s;
2192 memory_region_update_pending = true;
2193 memory_region_transaction_commit();
2194}
2195
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002196static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002197{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002198 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002199
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002200 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002201 memory_region_transaction_begin();
2202 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002203 memory_region_del_subregion(container, mr);
2204 mr->container = container;
2205 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002206 memory_region_unref(mr);
2207 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002208 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002209}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002210
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002211void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2212{
2213 if (addr != mr->addr) {
2214 mr->addr = addr;
2215 memory_region_readd_subregion(mr);
2216 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002217}
2218
Avi Kivitya8170e52012-10-23 12:30:10 +02002219void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002220{
Avi Kivity47033592011-12-04 19:16:50 +02002221 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002222
Jan Kiszka59023ef2012-08-23 13:02:30 +02002223 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002224 return;
2225 }
2226
Jan Kiszka59023ef2012-08-23 13:02:30 +02002227 memory_region_transaction_begin();
2228 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002229 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002230 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002231}
2232
Igor Mammedova2b257d2014-10-31 16:38:37 +00002233uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2234{
2235 return mr->align;
2236}
2237
Avi Kivitye2177952011-12-08 15:00:18 +02002238static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2239{
2240 const AddrRange *addr = addr_;
2241 const FlatRange *fr = fr_;
2242
2243 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2244 return -1;
2245 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2246 return 1;
2247 }
2248 return 0;
2249}
2250
Paolo Bonzini99e86342013-05-06 10:26:13 +02002251static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002252{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002253 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002254 sizeof(FlatRange), cmp_flatrange_addr);
2255}
2256
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002257bool memory_region_is_mapped(MemoryRegion *mr)
2258{
2259 return mr->container ? true : false;
2260}
2261
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002262/* Same as memory_region_find, but it does not add a reference to the
2263 * returned region. It must be called from an RCU critical section.
2264 */
2265static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2266 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002267{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002268 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002269 MemoryRegion *root;
2270 AddressSpace *as;
2271 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002272 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002273 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002274
Paolo Bonzini73034e92013-05-07 15:48:28 +02002275 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002276 for (root = mr; root->container; ) {
2277 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002278 addr += root->addr;
2279 }
2280
2281 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002282 if (!as) {
2283 return ret;
2284 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002285 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002286
Paolo Bonzini2b647662013-05-17 12:40:44 +02002287 view = atomic_rcu_read(&as->current_map);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002288 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002289 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002290 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002291 }
2292
Paolo Bonzini99e86342013-05-06 10:26:13 +02002293 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002294 --fr;
2295 }
2296
2297 ret.mr = fr->mr;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002298 ret.address_space = as;
Avi Kivitye2177952011-12-08 15:00:18 +02002299 range = addrrange_intersection(range, fr->addr);
2300 ret.offset_within_region = fr->offset_in_region;
2301 ret.offset_within_region += int128_get64(int128_sub(range.start,
2302 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002303 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002304 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002305 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002306 return ret;
2307}
2308
2309MemoryRegionSection memory_region_find(MemoryRegion *mr,
2310 hwaddr addr, uint64_t size)
2311{
2312 MemoryRegionSection ret;
2313 rcu_read_lock();
2314 ret = memory_region_find_rcu(mr, addr, size);
2315 if (ret.mr) {
2316 memory_region_ref(ret.mr);
2317 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002318 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002319 return ret;
2320}
2321
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002322bool memory_region_present(MemoryRegion *container, hwaddr addr)
2323{
2324 MemoryRegion *mr;
2325
2326 rcu_read_lock();
2327 mr = memory_region_find_rcu(container, addr, 1).mr;
2328 rcu_read_unlock();
2329 return mr && mr != container;
2330}
2331
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002332void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002333{
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002334 MemoryListener *listener;
2335 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002336 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002337 FlatRange *fr;
2338
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002339 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2340 if (!listener->log_sync) {
2341 continue;
2342 }
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002343 as = listener->address_space;
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002344 view = address_space_get_flatview(as);
2345 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonziniadaad612016-09-22 16:09:08 +02002346 if (fr->dirty_log_mask) {
2347 MemoryRegionSection mrs = section_from_flat_range(fr, as);
2348 listener->log_sync(listener, &mrs);
2349 }
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002350 }
2351 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002352 }
2353}
2354
2355void memory_global_dirty_log_start(void)
2356{
Avi Kivity7664e802011-12-11 14:47:25 +02002357 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002358
Avi Kivity7376e582012-02-08 21:05:17 +02002359 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002360
2361 /* Refresh DIRTY_LOG_MIGRATION bit. */
2362 memory_region_transaction_begin();
2363 memory_region_update_pending = true;
2364 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002365}
2366
2367void memory_global_dirty_log_stop(void)
2368{
Avi Kivity7664e802011-12-11 14:47:25 +02002369 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002370
2371 /* Refresh DIRTY_LOG_MIGRATION bit. */
2372 memory_region_transaction_begin();
2373 memory_region_update_pending = true;
2374 memory_region_transaction_commit();
2375
Avi Kivity7376e582012-02-08 21:05:17 +02002376 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002377}
2378
2379static void listener_add_address_space(MemoryListener *listener,
2380 AddressSpace *as)
2381{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002382 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002383 FlatRange *fr;
2384
Paolo Bonzini680a4782015-11-02 09:23:52 +01002385 if (listener->begin) {
2386 listener->begin(listener);
2387 }
Avi Kivity7664e802011-12-11 14:47:25 +02002388 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002389 if (listener->log_global_start) {
2390 listener->log_global_start(listener);
2391 }
Avi Kivity7664e802011-12-11 14:47:25 +02002392 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002393
Paolo Bonzini856d7242013-05-06 11:57:21 +02002394 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002395 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity7664e802011-12-11 14:47:25 +02002396 MemoryRegionSection section = {
2397 .mr = fr->mr,
Avi Kivityf6790af2012-10-02 20:13:51 +02002398 .address_space = as,
Avi Kivity7664e802011-12-11 14:47:25 +02002399 .offset_within_region = fr->offset_in_region,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002400 .size = fr->addr.size,
Avi Kivity7664e802011-12-11 14:47:25 +02002401 .offset_within_address_space = int128_get64(fr->addr.start),
Avi Kivity7a8499e2012-02-08 17:01:23 +02002402 .readonly = fr->readonly,
Avi Kivity7664e802011-12-11 14:47:25 +02002403 };
Paolo Bonzini680a4782015-11-02 09:23:52 +01002404 if (fr->dirty_log_mask && listener->log_start) {
2405 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2406 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002407 if (listener->region_add) {
2408 listener->region_add(listener, &section);
2409 }
Avi Kivity7664e802011-12-11 14:47:25 +02002410 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002411 if (listener->commit) {
2412 listener->commit(listener);
2413 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002414 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002415}
2416
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002417void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002418{
Avi Kivity72e22d22012-02-08 15:05:50 +02002419 MemoryListener *other = NULL;
2420
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002421 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002422 if (QTAILQ_EMPTY(&memory_listeners)
2423 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2424 memory_listeners)->priority) {
2425 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2426 } else {
2427 QTAILQ_FOREACH(other, &memory_listeners, link) {
2428 if (listener->priority < other->priority) {
2429 break;
2430 }
2431 }
2432 QTAILQ_INSERT_BEFORE(other, listener, link);
2433 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002434
Paolo Bonzini9a546352016-09-22 16:23:06 +02002435 if (QTAILQ_EMPTY(&as->listeners)
2436 || listener->priority >= QTAILQ_LAST(&as->listeners,
2437 memory_listeners)->priority) {
2438 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2439 } else {
2440 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2441 if (listener->priority < other->priority) {
2442 break;
2443 }
2444 }
2445 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2446 }
2447
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002448 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002449}
2450
2451void memory_listener_unregister(MemoryListener *listener)
2452{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002453 if (!listener->address_space) {
2454 return;
2455 }
2456
Avi Kivity72e22d22012-02-08 15:05:50 +02002457 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002458 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002459 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002460}
Avi Kivitye2177952011-12-08 15:00:18 +02002461
KONRAD Fredericc9356742016-10-19 15:06:49 +02002462bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2463{
2464 void *host;
2465 unsigned size = 0;
2466 unsigned offset = 0;
2467 Object *new_interface;
2468
2469 if (!mr || !mr->ops->request_ptr) {
2470 return false;
2471 }
2472
2473 /*
2474 * Avoid an update if the request_ptr call
2475 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2476 * a cache.
2477 */
2478 memory_region_transaction_begin();
2479
2480 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2481
2482 if (!host || !size) {
2483 memory_region_transaction_commit();
2484 return false;
2485 }
2486
2487 new_interface = object_new("mmio_interface");
2488 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2489 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2490 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2491 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2492 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2493 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2494
2495 memory_region_transaction_commit();
2496 return true;
2497}
2498
2499typedef struct MMIOPtrInvalidate {
2500 MemoryRegion *mr;
2501 hwaddr offset;
2502 unsigned size;
2503 int busy;
2504 int allocated;
2505} MMIOPtrInvalidate;
2506
2507#define MAX_MMIO_INVALIDATE 10
2508static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2509
2510static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2511 run_on_cpu_data data)
2512{
2513 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2514 MemoryRegion *mr = invalidate_data->mr;
2515 hwaddr offset = invalidate_data->offset;
2516 unsigned size = invalidate_data->size;
2517 MemoryRegionSection section = memory_region_find(mr, offset, size);
2518
2519 qemu_mutex_lock_iothread();
2520
2521 /* Reset dirty so this doesn't happen later. */
2522 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2523
2524 if (section.mr != mr) {
2525 /* memory_region_find add a ref on section.mr */
2526 memory_region_unref(section.mr);
2527 if (MMIO_INTERFACE(section.mr->owner)) {
2528 /* We found the interface just drop it. */
2529 object_property_set_bool(section.mr->owner, false, "realized",
2530 NULL);
2531 object_unref(section.mr->owner);
2532 object_unparent(section.mr->owner);
2533 }
2534 }
2535
2536 qemu_mutex_unlock_iothread();
2537
2538 if (invalidate_data->allocated) {
2539 g_free(invalidate_data);
2540 } else {
2541 invalidate_data->busy = 0;
2542 }
2543}
2544
2545void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2546 unsigned size)
2547{
2548 size_t i;
2549 MMIOPtrInvalidate *invalidate_data = NULL;
2550
2551 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2552 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2553 invalidate_data = &mmio_ptr_invalidate_list[i];
2554 break;
2555 }
2556 }
2557
2558 if (!invalidate_data) {
2559 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2560 invalidate_data->allocated = 1;
2561 }
2562
2563 invalidate_data->mr = mr;
2564 invalidate_data->offset = offset;
2565 invalidate_data->size = size;
2566
2567 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2568 RUN_ON_CPU_HOST_PTR(invalidate_data));
2569}
2570
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002571void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002572{
Paolo Bonziniac951902015-02-11 15:21:04 +01002573 memory_region_ref(root);
Jan Kiszka59023ef2012-08-23 13:02:30 +02002574 memory_region_transaction_begin();
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002575 as->ref_count = 1;
Avi Kivity8786db72012-10-02 13:53:41 +02002576 as->root = root;
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002577 as->malloced = false;
Avi Kivity8786db72012-10-02 13:53:41 +02002578 as->current_map = g_new(FlatView, 1);
2579 flatview_init(as->current_map);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002580 as->ioeventfd_nb = 0;
2581 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002582 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002583 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002584 as->name = g_strdup(name ? name : "anonymous");
Avi Kivityac1970f2012-10-03 16:22:53 +02002585 address_space_init_dispatch(as);
Paolo Bonzinif43793c2013-04-16 15:39:51 +02002586 memory_region_update_pending |= root->enabled;
2587 memory_region_transaction_commit();
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002588}
Avi Kivity658b2222011-07-26 14:26:08 +03002589
Paolo Bonzini374f2982013-05-17 12:37:03 +02002590static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002591{
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002592 bool do_free = as->malloced;
David Gibson078c44f2014-05-30 12:59:00 -06002593
Avi Kivity83f3c252012-10-07 12:59:55 +02002594 address_space_destroy_dispatch(as);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002595 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002596
Paolo Bonzini856d7242013-05-06 11:57:21 +02002597 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002598 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002599 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002600 memory_region_unref(as->root);
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002601 if (do_free) {
2602 g_free(as);
2603 }
2604}
2605
2606AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2607{
2608 AddressSpace *as;
2609
2610 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2611 if (root == as->root && as->malloced) {
2612 as->ref_count++;
2613 return as;
2614 }
2615 }
2616
2617 as = g_malloc0(sizeof *as);
2618 address_space_init(as, root, name);
2619 as->malloced = true;
2620 return as;
Avi Kivity83f3c252012-10-07 12:59:55 +02002621}
2622
Paolo Bonzini374f2982013-05-17 12:37:03 +02002623void address_space_destroy(AddressSpace *as)
2624{
Paolo Bonziniac951902015-02-11 15:21:04 +01002625 MemoryRegion *root = as->root;
2626
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002627 as->ref_count--;
2628 if (as->ref_count) {
2629 return;
2630 }
Paolo Bonzini374f2982013-05-17 12:37:03 +02002631 /* Flush out anything from MemoryListeners listening in on this */
2632 memory_region_transaction_begin();
2633 as->root = NULL;
2634 memory_region_transaction_commit();
2635 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002636 address_space_unregister(as);
Paolo Bonzini374f2982013-05-17 12:37:03 +02002637
2638 /* At this point, as->dispatch and as->current_map are dummy
2639 * entries that the guest should never use. Wait for the old
2640 * values to expire before freeing the data.
2641 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002642 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002643 call_rcu(as, do_address_space_destroy, rcu);
2644}
2645
Peter Xu4e831902017-01-16 16:40:04 +08002646static const char *memory_region_type(MemoryRegion *mr)
2647{
2648 if (memory_region_is_ram_device(mr)) {
2649 return "ramd";
2650 } else if (memory_region_is_romd(mr)) {
2651 return "romd";
2652 } else if (memory_region_is_rom(mr)) {
2653 return "rom";
2654 } else if (memory_region_is_ram(mr)) {
2655 return "ram";
2656 } else {
2657 return "i/o";
2658 }
2659}
2660
Blue Swirl314e2982011-09-11 20:22:05 +00002661typedef struct MemoryRegionList MemoryRegionList;
2662
2663struct MemoryRegionList {
2664 const MemoryRegion *mr;
Blue Swirl314e2982011-09-11 20:22:05 +00002665 QTAILQ_ENTRY(MemoryRegionList) queue;
2666};
2667
2668typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
2669
Peter Xu4e831902017-01-16 16:40:04 +08002670#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2671 int128_sub((size), int128_one())) : 0)
2672#define MTREE_INDENT " "
2673
Blue Swirl314e2982011-09-11 20:22:05 +00002674static void mtree_print_mr(fprintf_function mon_printf, void *f,
2675 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002676 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002677 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002678{
Jan Kiszka9479c572011-09-27 15:00:41 +02002679 MemoryRegionList *new_ml, *ml, *next_ml;
2680 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002681 const MemoryRegion *submr;
2682 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002683 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002684
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002685 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002686 return;
2687 }
2688
2689 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002690 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002691 }
2692
Peter Xub31f8412017-03-14 20:56:27 +08002693 cur_start = base + mr->addr;
2694 cur_end = cur_start + MR_SIZE(mr->size);
2695
2696 /*
2697 * Try to detect overflow of memory region. This should never
2698 * happen normally. When it happens, we dump something to warn the
2699 * user who is observing this.
2700 */
2701 if (cur_start < base || cur_end < cur_start) {
2702 mon_printf(f, "[DETECTED OVERFLOW!] ");
2703 }
2704
Blue Swirl314e2982011-09-11 20:22:05 +00002705 if (mr->alias) {
2706 MemoryRegionList *ml;
2707 bool found = false;
2708
2709 /* check if the alias is already in the queue */
Jan Kiszka9479c572011-09-27 15:00:41 +02002710 QTAILQ_FOREACH(ml, alias_print_queue, queue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002711 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002712 found = true;
2713 }
2714 }
2715
2716 if (!found) {
2717 ml = g_new(MemoryRegionList, 1);
2718 ml->mr = mr->alias;
Jan Kiszka9479c572011-09-27 15:00:41 +02002719 QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
Blue Swirl314e2982011-09-11 20:22:05 +00002720 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002721 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002722 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002723 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002724 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002725 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002726 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002727 memory_region_name(mr),
2728 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002729 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002730 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002731 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002732 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002733 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002734 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002735 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002736 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002737 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002738 memory_region_name(mr),
2739 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002740 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002741
2742 QTAILQ_INIT(&submr_print_queue);
2743
Blue Swirl314e2982011-09-11 20:22:05 +00002744 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002745 new_ml = g_new(MemoryRegionList, 1);
2746 new_ml->mr = submr;
2747 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2748 if (new_ml->mr->addr < ml->mr->addr ||
2749 (new_ml->mr->addr == ml->mr->addr &&
2750 new_ml->mr->priority > ml->mr->priority)) {
2751 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
2752 new_ml = NULL;
2753 break;
2754 }
2755 }
2756 if (new_ml) {
2757 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
2758 }
2759 }
2760
2761 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
Peter Xub31f8412017-03-14 20:56:27 +08002762 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002763 alias_print_queue);
2764 }
2765
Avi Kivity88365e42011-11-13 12:00:55 +02002766 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002767 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002768 }
2769}
2770
Peter Xu57bb40c2017-01-16 16:40:05 +08002771static void mtree_print_flatview(fprintf_function p, void *f,
2772 AddressSpace *as)
2773{
2774 FlatView *view = address_space_get_flatview(as);
2775 FlatRange *range = &view->ranges[0];
2776 MemoryRegion *mr;
2777 int n = view->nr;
2778
2779 if (n <= 0) {
2780 p(f, MTREE_INDENT "No rendered FlatView for "
2781 "address space '%s'\n", as->name);
2782 flatview_unref(view);
2783 return;
2784 }
2785
2786 while (n--) {
2787 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002788 if (range->offset_in_region) {
2789 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2790 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2791 int128_get64(range->addr.start),
2792 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2793 mr->priority,
2794 range->readonly ? "rom" : memory_region_type(mr),
2795 memory_region_name(mr),
2796 range->offset_in_region);
2797 } else {
2798 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2799 TARGET_FMT_plx " (prio %d, %s): %s\n",
2800 int128_get64(range->addr.start),
2801 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2802 mr->priority,
2803 range->readonly ? "rom" : memory_region_type(mr),
2804 memory_region_name(mr));
2805 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002806 range++;
2807 }
2808
2809 flatview_unref(view);
2810}
2811
2812void mtree_info(fprintf_function mon_printf, void *f, bool flatview)
Blue Swirl314e2982011-09-11 20:22:05 +00002813{
2814 MemoryRegionListHead ml_head;
2815 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02002816 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00002817
Peter Xu57bb40c2017-01-16 16:40:05 +08002818 if (flatview) {
2819 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2820 mon_printf(f, "address-space (flat view): %s\n", as->name);
2821 mtree_print_flatview(mon_printf, f, as);
2822 mon_printf(f, "\n");
2823 }
2824 return;
2825 }
2826
Blue Swirl314e2982011-09-11 20:22:05 +00002827 QTAILQ_INIT(&ml_head);
2828
Avi Kivity0d673e32012-10-02 15:28:50 +02002829 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002830 mon_printf(f, "address-space: %s\n", as->name);
2831 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2832 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00002833 }
2834
Blue Swirl314e2982011-09-11 20:22:05 +00002835 /* print aliased regions */
2836 QTAILQ_FOREACH(ml, &ml_head, queue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002837 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2838 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2839 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00002840 }
2841
2842 QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02002843 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002844 }
Blue Swirl314e2982011-09-11 20:22:05 +00002845}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002846
2847static const TypeInfo memory_region_info = {
2848 .parent = TYPE_OBJECT,
2849 .name = TYPE_MEMORY_REGION,
2850 .instance_size = sizeof(MemoryRegion),
2851 .instance_init = memory_region_initfn,
2852 .instance_finalize = memory_region_finalize,
2853};
2854
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002855static const TypeInfo iommu_memory_region_info = {
2856 .parent = TYPE_MEMORY_REGION,
2857 .name = TYPE_IOMMU_MEMORY_REGION,
2858 .instance_size = sizeof(IOMMUMemoryRegion),
2859 .instance_init = iommu_memory_region_initfn,
2860};
2861
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002862static void memory_register_types(void)
2863{
2864 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002865 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002866}
2867
2868type_init(memory_register_types)