blob: 4895e253765640d104d85148727c9471d237e94e [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Paolo Bonzini022c62c2012-12-17 18:19:49 +010016#include "exec/memory.h"
17#include "exec/address-spaces.h"
18#include "exec/ioport.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010019#include "qemu/bitops.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040020#include "qom/object.h"
Paolo Bonzini55d5d042013-07-28 14:57:22 +020021#include "trace.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030022#include <assert.h>
23
Paolo Bonzini022c62c2012-12-17 18:19:49 +010024#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020025#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020026
Paolo Bonzinid1970632013-05-24 13:23:38 +020027//#define DEBUG_UNASSIGNED
28
Jan Kiszka22bde712012-11-05 16:45:56 +010029static unsigned memory_region_transaction_depth;
30static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080031static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020032static bool global_dirty_log = false;
33
Paolo Bonzini856d7242013-05-06 11:57:21 +020034/* flat_view_mutex is taken around reading as->current_map; the critical
35 * section is extremely short, so I'm using a single mutex for every AS.
36 * We could also RCU for the read-side.
37 *
38 * The BQL is taken around transaction commits, hence both locks are taken
39 * while writing to as->current_map (with the BQL taken outside).
40 */
41static QemuMutex flat_view_mutex;
42
Avi Kivity72e22d22012-02-08 15:05:50 +020043static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
44 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030045
Avi Kivity0d673e32012-10-02 15:28:50 +020046static QTAILQ_HEAD(, AddressSpace) address_spaces
47 = QTAILQ_HEAD_INITIALIZER(address_spaces);
48
Paolo Bonzini856d7242013-05-06 11:57:21 +020049static void memory_init(void)
50{
51 qemu_mutex_init(&flat_view_mutex);
52}
53
Avi Kivity093bc2c2011-07-26 14:26:01 +030054typedef struct AddrRange AddrRange;
55
Avi Kivity8417ceb2011-08-03 11:56:14 +030056/*
57 * Note using signed integers limits us to physical addresses at most
58 * 63 bits wide. They are needed for negative offsetting in aliases
59 * (large MemoryRegion::alias_offset).
60 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030061struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020062 Int128 start;
63 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030064};
65
Avi Kivity08dafab2011-10-16 13:19:17 +020066static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030067{
68 return (AddrRange) { start, size };
69}
70
71static bool addrrange_equal(AddrRange r1, AddrRange r2)
72{
Avi Kivity08dafab2011-10-16 13:19:17 +020073 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030074}
75
Avi Kivity08dafab2011-10-16 13:19:17 +020076static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030077{
Avi Kivity08dafab2011-10-16 13:19:17 +020078 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030079}
80
Avi Kivity08dafab2011-10-16 13:19:17 +020081static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030082{
Avi Kivity08dafab2011-10-16 13:19:17 +020083 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030084 return range;
85}
86
Avi Kivity08dafab2011-10-16 13:19:17 +020087static bool addrrange_contains(AddrRange range, Int128 addr)
88{
89 return int128_ge(addr, range.start)
90 && int128_lt(addr, addrrange_end(range));
91}
92
Avi Kivity093bc2c2011-07-26 14:26:01 +030093static bool addrrange_intersects(AddrRange r1, AddrRange r2)
94{
Avi Kivity08dafab2011-10-16 13:19:17 +020095 return addrrange_contains(r1, r2.start)
96 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030097}
98
99static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
100{
Avi Kivity08dafab2011-10-16 13:19:17 +0200101 Int128 start = int128_max(r1.start, r2.start);
102 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
103 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300104}
105
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200106enum ListenerDirection { Forward, Reverse };
107
Avi Kivity7376e582012-02-08 21:05:17 +0200108static bool memory_listener_match(MemoryListener *listener,
109 MemoryRegionSection *section)
110{
111 return !listener->address_space_filter
112 || listener->address_space_filter == section->address_space;
113}
114
115#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200116 do { \
117 MemoryListener *_listener; \
118 \
119 switch (_direction) { \
120 case Forward: \
121 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200122 if (_listener->_callback) { \
123 _listener->_callback(_listener, ##_args); \
124 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200125 } \
126 break; \
127 case Reverse: \
128 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
129 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200130 if (_listener->_callback) { \
131 _listener->_callback(_listener, ##_args); \
132 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200133 } \
134 break; \
135 default: \
136 abort(); \
137 } \
138 } while (0)
139
Avi Kivity7376e582012-02-08 21:05:17 +0200140#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
141 do { \
142 MemoryListener *_listener; \
143 \
144 switch (_direction) { \
145 case Forward: \
146 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200147 if (_listener->_callback \
148 && memory_listener_match(_listener, _section)) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200149 _listener->_callback(_listener, _section, ##_args); \
150 } \
151 } \
152 break; \
153 case Reverse: \
154 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
155 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200156 if (_listener->_callback \
157 && memory_listener_match(_listener, _section)) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200158 _listener->_callback(_listener, _section, ##_args); \
159 } \
160 } \
161 break; \
162 default: \
163 abort(); \
164 } \
165 } while (0)
166
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200167/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200168#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \
Avi Kivity7376e582012-02-08 21:05:17 +0200169 MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200170 .mr = (fr)->mr, \
Avi Kivityf6790af2012-10-02 20:13:51 +0200171 .address_space = (as), \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200172 .offset_within_region = (fr)->offset_in_region, \
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200173 .size = (fr)->addr.size, \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200174 .offset_within_address_space = int128_get64((fr)->addr.start), \
Avi Kivity7a8499e2012-02-08 17:01:23 +0200175 .readonly = (fr)->readonly, \
Avi Kivity7376e582012-02-08 21:05:17 +0200176 }))
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200177
Avi Kivity093bc2c2011-07-26 14:26:01 +0300178struct CoalescedMemoryRange {
179 AddrRange addr;
180 QTAILQ_ENTRY(CoalescedMemoryRange) link;
181};
182
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300183struct MemoryRegionIoeventfd {
184 AddrRange addr;
185 bool match_data;
186 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200187 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300188};
189
190static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
191 MemoryRegionIoeventfd b)
192{
Avi Kivity08dafab2011-10-16 13:19:17 +0200193 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300194 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200195 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300196 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200197 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300198 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200199 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300200 return false;
201 } else if (a.match_data < b.match_data) {
202 return true;
203 } else if (a.match_data > b.match_data) {
204 return false;
205 } else if (a.match_data) {
206 if (a.data < b.data) {
207 return true;
208 } else if (a.data > b.data) {
209 return false;
210 }
211 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200212 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300213 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200214 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300215 return false;
216 }
217 return false;
218}
219
220static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
221 MemoryRegionIoeventfd b)
222{
223 return !memory_region_ioeventfd_before(a, b)
224 && !memory_region_ioeventfd_before(b, a);
225}
226
Avi Kivity093bc2c2011-07-26 14:26:01 +0300227typedef struct FlatRange FlatRange;
228typedef struct FlatView FlatView;
229
230/* Range of memory in the global map. Addresses are absolute. */
231struct FlatRange {
232 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200233 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300234 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300235 uint8_t dirty_log_mask;
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200236 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300237 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300238};
239
240/* Flattened global view of current active memory hierarchy. Kept in sorted
241 * order.
242 */
243struct FlatView {
Paolo Bonzini856d7242013-05-06 11:57:21 +0200244 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300245 FlatRange *ranges;
246 unsigned nr;
247 unsigned nr_allocated;
248};
249
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300250typedef struct AddressSpaceOps AddressSpaceOps;
251
Avi Kivity093bc2c2011-07-26 14:26:01 +0300252#define FOR_EACH_FLAT_RANGE(var, view) \
253 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
254
Avi Kivity093bc2c2011-07-26 14:26:01 +0300255static bool flatrange_equal(FlatRange *a, FlatRange *b)
256{
257 return a->mr == b->mr
258 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300259 && a->offset_in_region == b->offset_in_region
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200260 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300261 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300262}
263
264static void flatview_init(FlatView *view)
265{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200266 view->ref = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300267 view->ranges = NULL;
268 view->nr = 0;
269 view->nr_allocated = 0;
270}
271
272/* Insert a range into a given position. Caller is responsible for maintaining
273 * sorting order.
274 */
275static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
276{
277 if (view->nr == view->nr_allocated) {
278 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500279 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300280 view->nr_allocated * sizeof(*view->ranges));
281 }
282 memmove(view->ranges + pos + 1, view->ranges + pos,
283 (view->nr - pos) * sizeof(FlatRange));
284 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200285 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300286 ++view->nr;
287}
288
289static void flatview_destroy(FlatView *view)
290{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200291 int i;
292
293 for (i = 0; i < view->nr; i++) {
294 memory_region_unref(view->ranges[i].mr);
295 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500296 g_free(view->ranges);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200297 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300298}
299
Paolo Bonzini856d7242013-05-06 11:57:21 +0200300static void flatview_ref(FlatView *view)
301{
302 atomic_inc(&view->ref);
303}
304
305static void flatview_unref(FlatView *view)
306{
307 if (atomic_fetch_dec(&view->ref) == 1) {
308 flatview_destroy(view);
309 }
310}
311
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300312static bool can_merge(FlatRange *r1, FlatRange *r2)
313{
Avi Kivity08dafab2011-10-16 13:19:17 +0200314 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300315 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200316 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
317 r1->addr.size),
318 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300319 && r1->dirty_log_mask == r2->dirty_log_mask
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200320 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300321 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300322}
323
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000324/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300325static void flatview_simplify(FlatView *view)
326{
327 unsigned i, j;
328
329 i = 0;
330 while (i < view->nr) {
331 j = i + 1;
332 while (j < view->nr
333 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200334 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300335 ++j;
336 }
337 ++i;
338 memmove(&view->ranges[i], &view->ranges[j],
339 (view->nr - j) * sizeof(view->ranges[j]));
340 view->nr -= j - i;
341 }
342}
343
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200344static bool memory_region_big_endian(MemoryRegion *mr)
345{
346#ifdef TARGET_WORDS_BIGENDIAN
347 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
348#else
349 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
350#endif
351}
352
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200353static bool memory_region_wrong_endianness(MemoryRegion *mr)
354{
355#ifdef TARGET_WORDS_BIGENDIAN
356 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
357#else
358 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
359#endif
360}
361
362static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
363{
364 if (memory_region_wrong_endianness(mr)) {
365 switch (size) {
366 case 1:
367 break;
368 case 2:
369 *data = bswap16(*data);
370 break;
371 case 4:
372 *data = bswap32(*data);
373 break;
374 case 8:
375 *data = bswap64(*data);
376 break;
377 default:
378 abort();
379 }
380 }
381}
382
Paolo Bonzini547e9202013-07-22 15:54:36 +0200383static void memory_region_oldmmio_read_accessor(MemoryRegion *mr,
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200384 hwaddr addr,
385 uint64_t *value,
386 unsigned size,
387 unsigned shift,
388 uint64_t mask)
389{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200390 uint64_t tmp;
391
392 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Paolo Bonzini55d5d042013-07-28 14:57:22 +0200393 trace_memory_region_ops_read(mr, addr, tmp, size);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200394 *value |= (tmp & mask) << shift;
395}
396
Paolo Bonzini547e9202013-07-22 15:54:36 +0200397static void memory_region_read_accessor(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200398 hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300399 uint64_t *value,
400 unsigned size,
401 unsigned shift,
402 uint64_t mask)
403{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300404 uint64_t tmp;
405
Jan Kiszkad4105152012-08-23 13:02:29 +0200406 if (mr->flush_coalesced_mmio) {
407 qemu_flush_coalesced_mmio_buffer();
408 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300409 tmp = mr->ops->read(mr->opaque, addr, size);
Paolo Bonzini55d5d042013-07-28 14:57:22 +0200410 trace_memory_region_ops_read(mr, addr, tmp, size);
Avi Kivity164a4dc2011-08-11 10:40:25 +0300411 *value |= (tmp & mask) << shift;
412}
413
Paolo Bonzini547e9202013-07-22 15:54:36 +0200414static void memory_region_oldmmio_write_accessor(MemoryRegion *mr,
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200415 hwaddr addr,
416 uint64_t *value,
417 unsigned size,
418 unsigned shift,
419 uint64_t mask)
420{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200421 uint64_t tmp;
422
423 tmp = (*value >> shift) & mask;
Paolo Bonzini55d5d042013-07-28 14:57:22 +0200424 trace_memory_region_ops_write(mr, addr, tmp, size);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200425 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
426}
427
Paolo Bonzini547e9202013-07-22 15:54:36 +0200428static void memory_region_write_accessor(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200429 hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300430 uint64_t *value,
431 unsigned size,
432 unsigned shift,
433 uint64_t mask)
434{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300435 uint64_t tmp;
436
Jan Kiszkad4105152012-08-23 13:02:29 +0200437 if (mr->flush_coalesced_mmio) {
438 qemu_flush_coalesced_mmio_buffer();
439 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300440 tmp = (*value >> shift) & mask;
Paolo Bonzini55d5d042013-07-28 14:57:22 +0200441 trace_memory_region_ops_write(mr, addr, tmp, size);
Avi Kivity164a4dc2011-08-11 10:40:25 +0300442 mr->ops->write(mr->opaque, addr, tmp, size);
443}
444
Avi Kivitya8170e52012-10-23 12:30:10 +0200445static void access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300446 uint64_t *value,
447 unsigned size,
448 unsigned access_size_min,
449 unsigned access_size_max,
Paolo Bonzini547e9202013-07-22 15:54:36 +0200450 void (*access)(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200451 hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300452 uint64_t *value,
453 unsigned size,
454 unsigned shift,
455 uint64_t mask),
Paolo Bonzini547e9202013-07-22 15:54:36 +0200456 MemoryRegion *mr)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300457{
458 uint64_t access_mask;
459 unsigned access_size;
460 unsigned i;
461
462 if (!access_size_min) {
463 access_size_min = 1;
464 }
465 if (!access_size_max) {
466 access_size_max = 4;
467 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200468
469 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300470 access_size = MAX(MIN(size, access_size_max), access_size_min);
471 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200472 if (memory_region_big_endian(mr)) {
473 for (i = 0; i < size; i += access_size) {
474 access(mr, addr + i, value, access_size,
475 (size - access_size - i) * 8, access_mask);
476 }
477 } else {
478 for (i = 0; i < size; i += access_size) {
479 access(mr, addr + i, value, access_size, i * 8, access_mask);
480 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300481 }
482}
483
Avi Kivitye2177952011-12-08 15:00:18 +0200484static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
485{
Avi Kivity0d673e32012-10-02 15:28:50 +0200486 AddressSpace *as;
487
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200488 while (mr->container) {
489 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200490 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200491 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
492 if (mr == as->root) {
493 return as;
494 }
Avi Kivitye2177952011-12-08 15:00:18 +0200495 }
496 abort();
497}
498
Avi Kivity093bc2c2011-07-26 14:26:01 +0300499/* Render a memory region into the global view. Ranges in @view obscure
500 * ranges in @mr.
501 */
502static void render_memory_region(FlatView *view,
503 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200504 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300505 AddrRange clip,
506 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300507{
508 MemoryRegion *subregion;
509 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200510 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200511 Int128 remain;
512 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300513 FlatRange fr;
514 AddrRange tmp;
515
Avi Kivity6bba19b2011-09-14 11:54:58 +0300516 if (!mr->enabled) {
517 return;
518 }
519
Avi Kivity08dafab2011-10-16 13:19:17 +0200520 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300521 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300522
523 tmp = addrrange_make(base, mr->size);
524
525 if (!addrrange_intersects(tmp, clip)) {
526 return;
527 }
528
529 clip = addrrange_intersection(tmp, clip);
530
531 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200532 int128_subfrom(&base, int128_make64(mr->alias->addr));
533 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300534 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300535 return;
536 }
537
538 /* Render subregions in priority order. */
539 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300540 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300541 }
542
Avi Kivity14a3c102011-07-26 14:26:06 +0300543 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300544 return;
545 }
546
Avi Kivity08dafab2011-10-16 13:19:17 +0200547 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300548 base = clip.start;
549 remain = clip.size;
550
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000551 fr.mr = mr;
552 fr.dirty_log_mask = mr->dirty_log_mask;
553 fr.romd_mode = mr->romd_mode;
554 fr.readonly = readonly;
555
Avi Kivity093bc2c2011-07-26 14:26:01 +0300556 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200557 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
558 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300559 continue;
560 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200561 if (int128_lt(base, view->ranges[i].addr.start)) {
562 now = int128_min(remain,
563 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300564 fr.offset_in_region = offset_in_region;
565 fr.addr = addrrange_make(base, now);
566 flatview_insert(view, i, &fr);
567 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200568 int128_addto(&base, now);
569 offset_in_region += int128_get64(now);
570 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300571 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200572 now = int128_sub(int128_min(int128_add(base, remain),
573 addrrange_end(view->ranges[i].addr)),
574 base);
575 int128_addto(&base, now);
576 offset_in_region += int128_get64(now);
577 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300578 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200579 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300580 fr.offset_in_region = offset_in_region;
581 fr.addr = addrrange_make(base, remain);
582 flatview_insert(view, i, &fr);
583 }
584}
585
586/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200587static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300588{
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200589 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300590
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200591 view = g_new(FlatView, 1);
592 flatview_init(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300593
Avi Kivity83f3c252012-10-07 12:59:55 +0200594 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200595 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200596 addrrange_make(int128_zero(), int128_2_64()), false);
597 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200598 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300599
600 return view;
601}
602
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300603static void address_space_add_del_ioeventfds(AddressSpace *as,
604 MemoryRegionIoeventfd *fds_new,
605 unsigned fds_new_nb,
606 MemoryRegionIoeventfd *fds_old,
607 unsigned fds_old_nb)
608{
609 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200610 MemoryRegionIoeventfd *fd;
611 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300612
613 /* Generate a symmetric difference of the old and new fd sets, adding
614 * and deleting as necessary.
615 */
616
617 iold = inew = 0;
618 while (iold < fds_old_nb || inew < fds_new_nb) {
619 if (iold < fds_old_nb
620 && (inew == fds_new_nb
621 || memory_region_ioeventfd_before(fds_old[iold],
622 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200623 fd = &fds_old[iold];
624 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200625 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200626 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200627 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200628 };
629 MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200630 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300631 ++iold;
632 } else if (inew < fds_new_nb
633 && (iold == fds_old_nb
634 || memory_region_ioeventfd_before(fds_new[inew],
635 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200636 fd = &fds_new[inew];
637 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200638 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200639 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200640 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200641 };
642 MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200643 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300644 ++inew;
645 } else {
646 ++iold;
647 ++inew;
648 }
649 }
650}
651
Paolo Bonzini856d7242013-05-06 11:57:21 +0200652static FlatView *address_space_get_flatview(AddressSpace *as)
653{
654 FlatView *view;
655
656 qemu_mutex_lock(&flat_view_mutex);
657 view = as->current_map;
658 flatview_ref(view);
659 qemu_mutex_unlock(&flat_view_mutex);
660 return view;
661}
662
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300663static void address_space_update_ioeventfds(AddressSpace *as)
664{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200665 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300666 FlatRange *fr;
667 unsigned ioeventfd_nb = 0;
668 MemoryRegionIoeventfd *ioeventfds = NULL;
669 AddrRange tmp;
670 unsigned i;
671
Paolo Bonzini856d7242013-05-06 11:57:21 +0200672 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200673 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300674 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
675 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200676 int128_sub(fr->addr.start,
677 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300678 if (addrrange_intersects(fr->addr, tmp)) {
679 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500680 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300681 ioeventfd_nb * sizeof(*ioeventfds));
682 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
683 ioeventfds[ioeventfd_nb-1].addr = tmp;
684 }
685 }
686 }
687
688 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
689 as->ioeventfds, as->ioeventfd_nb);
690
Anthony Liguori7267c092011-08-20 22:09:37 -0500691 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300692 as->ioeventfds = ioeventfds;
693 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200694 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300695}
696
Avi Kivityb8af1af2011-07-26 14:26:12 +0300697static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200698 const FlatView *old_view,
699 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300700 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300701{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300702 unsigned iold, inew;
703 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300704
705 /* Generate a symmetric difference of the old and new memory maps.
706 * Kill ranges in the old map, and instantiate ranges in the new map.
707 */
708 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200709 while (iold < old_view->nr || inew < new_view->nr) {
710 if (iold < old_view->nr) {
711 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300712 } else {
713 frold = NULL;
714 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200715 if (inew < new_view->nr) {
716 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300717 } else {
718 frnew = NULL;
719 }
720
721 if (frold
722 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200723 || int128_lt(frold->addr.start, frnew->addr.start)
724 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300725 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000726 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300727
Avi Kivityb8af1af2011-07-26 14:26:12 +0300728 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200729 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300730 }
731
Avi Kivity093bc2c2011-07-26 14:26:01 +0300732 ++iold;
733 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000734 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300735
Avi Kivityb8af1af2011-07-26 14:26:12 +0300736 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200737 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300738 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200739 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300740 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200741 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300742 }
Avi Kivity5a583342011-07-26 14:26:02 +0300743 }
744
Avi Kivity093bc2c2011-07-26 14:26:01 +0300745 ++iold;
746 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300747 } else {
748 /* In new */
749
Avi Kivityb8af1af2011-07-26 14:26:12 +0300750 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200751 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300752 }
753
Avi Kivity093bc2c2011-07-26 14:26:01 +0300754 ++inew;
755 }
756 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300757}
758
759
760static void address_space_update_topology(AddressSpace *as)
761{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200762 FlatView *old_view = address_space_get_flatview(as);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200763 FlatView *new_view = generate_memory_topology(as->root);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300764
765 address_space_update_topology_pass(as, old_view, new_view, false);
766 address_space_update_topology_pass(as, old_view, new_view, true);
767
Paolo Bonzini856d7242013-05-06 11:57:21 +0200768 qemu_mutex_lock(&flat_view_mutex);
769 flatview_unref(as->current_map);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200770 as->current_map = new_view;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200771 qemu_mutex_unlock(&flat_view_mutex);
772
773 /* Note that all the old MemoryRegions are still alive up to this
774 * point. This relieves most MemoryListeners from the need to
775 * ref/unref the MemoryRegions they get---unless they use them
776 * outside the iothread mutex, in which case precise reference
777 * counting is necessary.
778 */
779 flatview_unref(old_view);
780
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300781 address_space_update_ioeventfds(as);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300782}
783
Avi Kivity4ef4db82011-07-26 14:26:13 +0300784void memory_region_transaction_begin(void)
785{
Jan Kiszkabb880de2012-08-23 13:02:32 +0200786 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +0300787 ++memory_region_transaction_depth;
788}
789
Gonglei4dc56152014-05-08 11:47:32 +0800790static void memory_region_clear_pending(void)
791{
792 memory_region_update_pending = false;
793 ioeventfd_update_pending = false;
794}
795
Avi Kivity4ef4db82011-07-26 14:26:13 +0300796void memory_region_transaction_commit(void)
797{
Avi Kivity0d673e32012-10-02 15:28:50 +0200798 AddressSpace *as;
799
Avi Kivity4ef4db82011-07-26 14:26:13 +0300800 assert(memory_region_transaction_depth);
801 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +0800802 if (!memory_region_transaction_depth) {
803 if (memory_region_update_pending) {
804 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +0200805
Gonglei4dc56152014-05-08 11:47:32 +0800806 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
807 address_space_update_topology(as);
808 }
809
810 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
811 } else if (ioeventfd_update_pending) {
812 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
813 address_space_update_ioeventfds(as);
814 }
Jan Kiszka02e2b952012-08-23 13:02:31 +0200815 }
Gonglei4dc56152014-05-08 11:47:32 +0800816 memory_region_clear_pending();
817 }
Avi Kivity4ef4db82011-07-26 14:26:13 +0300818}
819
Avi Kivity545e92e2011-08-08 19:58:48 +0300820static void memory_region_destructor_none(MemoryRegion *mr)
821{
822}
823
824static void memory_region_destructor_ram(MemoryRegion *mr)
825{
826 qemu_ram_free(mr->ram_addr);
827}
828
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200829static void memory_region_destructor_alias(MemoryRegion *mr)
830{
831 memory_region_unref(mr->alias);
832}
833
Avi Kivity545e92e2011-08-08 19:58:48 +0300834static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
835{
836 qemu_ram_free_from_ptr(mr->ram_addr);
837}
838
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300839static void memory_region_destructor_rom_device(MemoryRegion *mr)
840{
841 qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300842}
843
Avi Kivity093bc2c2011-07-26 14:26:01 +0300844void memory_region_init(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400845 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300846 const char *name,
847 uint64_t size)
848{
Paolo Bonzini2cdfcf22013-05-24 13:55:52 +0200849 mr->ops = &unassigned_mem_ops;
850 mr->opaque = NULL;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400851 mr->owner = owner;
Avi Kivity30951152012-10-30 13:47:46 +0200852 mr->iommu_ops = NULL;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200853 mr->container = NULL;
Avi Kivity08dafab2011-10-16 13:19:17 +0200854 mr->size = int128_make64(size);
855 if (size == UINT64_MAX) {
856 mr->size = int128_2_64();
857 }
Avi Kivity093bc2c2011-07-26 14:26:01 +0300858 mr->addr = 0;
Avi Kivityb3b00c72012-01-02 13:20:11 +0200859 mr->subpage = false;
Avi Kivity6bba19b2011-09-14 11:54:58 +0300860 mr->enabled = true;
Avi Kivity14a3c102011-07-26 14:26:06 +0300861 mr->terminates = false;
Avi Kivity8ea92522011-12-08 15:58:43 +0200862 mr->ram = false;
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200863 mr->romd_mode = true;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300864 mr->readonly = false;
Avi Kivity75c578d2012-01-02 15:40:52 +0200865 mr->rom_device = false;
Avi Kivity545e92e2011-08-08 19:58:48 +0300866 mr->destructor = memory_region_destructor_none;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300867 mr->priority = 0;
868 mr->may_overlap = false;
869 mr->alias = NULL;
870 QTAILQ_INIT(&mr->subregions);
871 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
872 QTAILQ_INIT(&mr->coalesced);
Anthony Liguori7267c092011-08-20 22:09:37 -0500873 mr->name = g_strdup(name);
Avi Kivity5a583342011-07-26 14:26:02 +0300874 mr->dirty_log_mask = 0;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300875 mr->ioeventfd_nb = 0;
876 mr->ioeventfds = NULL;
Jan Kiszkad4105152012-08-23 13:02:29 +0200877 mr->flush_coalesced_mmio = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300878}
879
Paolo Bonzinib018ddf2013-05-24 14:48:38 +0200880static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
881 unsigned size)
882{
883#ifdef DEBUG_UNASSIGNED
884 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
885#endif
Andreas Färber4917cf42013-05-27 05:17:50 +0200886 if (current_cpu != NULL) {
887 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +0200888 }
Jan Kiszka68a74392013-09-02 18:43:31 +0200889 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +0200890}
891
892static void unassigned_mem_write(void *opaque, hwaddr addr,
893 uint64_t val, unsigned size)
894{
895#ifdef DEBUG_UNASSIGNED
896 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
897#endif
Andreas Färber4917cf42013-05-27 05:17:50 +0200898 if (current_cpu != NULL) {
899 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +0200900 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +0200901}
902
Paolo Bonzinid1970632013-05-24 13:23:38 +0200903static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
904 unsigned size, bool is_write)
905{
906 return false;
907}
908
909const MemoryRegionOps unassigned_mem_ops = {
910 .valid.accepts = unassigned_mem_accepts,
911 .endianness = DEVICE_NATIVE_ENDIAN,
912};
913
Paolo Bonzinid2702032013-05-24 11:55:06 +0200914bool memory_region_access_valid(MemoryRegion *mr,
915 hwaddr addr,
916 unsigned size,
917 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300918{
Paolo Bonzinia014ed02013-05-24 17:48:52 +0200919 int access_size_min, access_size_max;
920 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +0200921
Avi Kivity093bc2c2011-07-26 14:26:01 +0300922 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
923 return false;
924 }
925
Paolo Bonzinia014ed02013-05-24 17:48:52 +0200926 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300927 return true;
928 }
929
Paolo Bonzinia014ed02013-05-24 17:48:52 +0200930 access_size_min = mr->ops->valid.min_access_size;
931 if (!mr->ops->valid.min_access_size) {
932 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300933 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +0200934
935 access_size_max = mr->ops->valid.max_access_size;
936 if (!mr->ops->valid.max_access_size) {
937 access_size_max = 4;
938 }
939
940 access_size = MAX(MIN(size, access_size_max), access_size_min);
941 for (i = 0; i < size; i += access_size) {
942 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
943 is_write)) {
944 return false;
945 }
946 }
947
Avi Kivity093bc2c2011-07-26 14:26:01 +0300948 return true;
949}
950
Avi Kivitya621f382012-01-02 13:12:08 +0200951static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200952 hwaddr addr,
Avi Kivitya621f382012-01-02 13:12:08 +0200953 unsigned size)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300954{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300955 uint64_t data = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300956
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200957 if (mr->ops->read) {
958 access_with_adjusted_size(addr, &data, size,
959 mr->ops->impl.min_access_size,
960 mr->ops->impl.max_access_size,
961 memory_region_read_accessor, mr);
962 } else {
963 access_with_adjusted_size(addr, &data, size, 1, 4,
964 memory_region_oldmmio_read_accessor, mr);
Avi Kivity74901c32011-07-26 14:26:10 +0300965 }
966
Avi Kivity093bc2c2011-07-26 14:26:01 +0300967 return data;
968}
969
Paolo Bonzini791af8c2013-05-24 16:10:39 +0200970static bool memory_region_dispatch_read(MemoryRegion *mr,
971 hwaddr addr,
972 uint64_t *pval,
973 unsigned size)
Avi Kivitya621f382012-01-02 13:12:08 +0200974{
Paolo Bonzini791af8c2013-05-24 16:10:39 +0200975 if (!memory_region_access_valid(mr, addr, size, false)) {
976 *pval = unassigned_mem_read(mr, addr, size);
977 return true;
978 }
Avi Kivitya621f382012-01-02 13:12:08 +0200979
Paolo Bonzini791af8c2013-05-24 16:10:39 +0200980 *pval = memory_region_dispatch_read1(mr, addr, size);
981 adjust_endianness(mr, pval, size);
982 return false;
Avi Kivitya621f382012-01-02 13:12:08 +0200983}
984
Paolo Bonzini791af8c2013-05-24 16:10:39 +0200985static bool memory_region_dispatch_write(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200986 hwaddr addr,
Avi Kivitya621f382012-01-02 13:12:08 +0200987 uint64_t data,
988 unsigned size)
989{
Avi Kivity897fa7c2011-11-13 13:05:27 +0200990 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +0200991 unassigned_mem_write(mr, addr, data, size);
Paolo Bonzini791af8c2013-05-24 16:10:39 +0200992 return true;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300993 }
994
Avi Kivitya621f382012-01-02 13:12:08 +0200995 adjust_endianness(mr, &data, size);
996
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200997 if (mr->ops->write) {
998 access_with_adjusted_size(addr, &data, size,
999 mr->ops->impl.min_access_size,
1000 mr->ops->impl.max_access_size,
1001 memory_region_write_accessor, mr);
1002 } else {
1003 access_with_adjusted_size(addr, &data, size, 1, 4,
1004 memory_region_oldmmio_write_accessor, mr);
Avi Kivity74901c32011-07-26 14:26:10 +03001005 }
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001006 return false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001007}
1008
Avi Kivity093bc2c2011-07-26 14:26:01 +03001009void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001010 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001011 const MemoryRegionOps *ops,
1012 void *opaque,
1013 const char *name,
1014 uint64_t size)
1015{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001016 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001017 mr->ops = ops;
1018 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001019 mr->terminates = true;
Avi Kivity97161e12012-03-08 19:16:39 +02001020 mr->ram_addr = ~(ram_addr_t)0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001021}
1022
1023void memory_region_init_ram(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001024 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001025 const char *name,
1026 uint64_t size)
1027{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001028 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001029 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001030 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001031 mr->destructor = memory_region_destructor_ram;
Avi Kivityc5705a72011-12-20 15:59:12 +02001032 mr->ram_addr = qemu_ram_alloc(size, mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001033}
1034
1035void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001036 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001037 const char *name,
1038 uint64_t size,
1039 void *ptr)
1040{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001041 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001042 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001043 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001044 mr->destructor = memory_region_destructor_ram_from_ptr;
Avi Kivityc5705a72011-12-20 15:59:12 +02001045 mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001046}
1047
1048void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001049 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001050 const char *name,
1051 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001052 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001053 uint64_t size)
1054{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001055 memory_region_init(mr, owner, name, size);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001056 memory_region_ref(orig);
1057 mr->destructor = memory_region_destructor_alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001058 mr->alias = orig;
1059 mr->alias_offset = offset;
1060}
1061
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001062void memory_region_init_rom_device(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001063 Object *owner,
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001064 const MemoryRegionOps *ops,
Avi Kivity75f59412011-08-26 00:35:15 +03001065 void *opaque,
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001066 const char *name,
1067 uint64_t size)
1068{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001069 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001070 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001071 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001072 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001073 mr->rom_device = true;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001074 mr->destructor = memory_region_destructor_rom_device;
Avi Kivityc5705a72011-12-20 15:59:12 +02001075 mr->ram_addr = qemu_ram_alloc(size, mr);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001076}
1077
Avi Kivity30951152012-10-30 13:47:46 +02001078void memory_region_init_iommu(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001079 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001080 const MemoryRegionIOMMUOps *ops,
1081 const char *name,
1082 uint64_t size)
1083{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001084 memory_region_init(mr, owner, name, size);
Avi Kivity30951152012-10-30 13:47:46 +02001085 mr->iommu_ops = ops,
1086 mr->terminates = true; /* then re-forwards */
David Gibson06866572013-05-14 19:13:56 +10001087 notifier_list_init(&mr->iommu_notify);
Avi Kivity30951152012-10-30 13:47:46 +02001088}
1089
Jan Kiszka1660e722011-10-23 16:01:19 +02001090void memory_region_init_reservation(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001091 Object *owner,
Jan Kiszka1660e722011-10-23 16:01:19 +02001092 const char *name,
1093 uint64_t size)
1094{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001095 memory_region_init_io(mr, owner, &unassigned_mem_ops, mr, name, size);
Jan Kiszka1660e722011-10-23 16:01:19 +02001096}
1097
Avi Kivity093bc2c2011-07-26 14:26:01 +03001098void memory_region_destroy(MemoryRegion *mr)
1099{
1100 assert(QTAILQ_EMPTY(&mr->subregions));
Avi Kivity2be0e252012-10-17 17:14:41 +02001101 assert(memory_region_transaction_depth == 0);
Avi Kivity545e92e2011-08-08 19:58:48 +03001102 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001103 memory_region_clear_coalescing(mr);
Anthony Liguori7267c092011-08-20 22:09:37 -05001104 g_free((char *)mr->name);
1105 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001106}
1107
Paolo Bonzini803c0812013-05-07 06:59:09 +02001108Object *memory_region_owner(MemoryRegion *mr)
1109{
1110 return mr->owner;
1111}
1112
Paolo Bonzini46637be2013-05-07 09:06:00 +02001113void memory_region_ref(MemoryRegion *mr)
1114{
1115 if (mr && mr->owner) {
1116 object_ref(mr->owner);
1117 }
1118}
1119
1120void memory_region_unref(MemoryRegion *mr)
1121{
1122 if (mr && mr->owner) {
1123 object_unref(mr->owner);
1124 }
1125}
1126
Avi Kivity093bc2c2011-07-26 14:26:01 +03001127uint64_t memory_region_size(MemoryRegion *mr)
1128{
Avi Kivity08dafab2011-10-16 13:19:17 +02001129 if (int128_eq(mr->size, int128_2_64())) {
1130 return UINT64_MAX;
1131 }
1132 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001133}
1134
Avi Kivity8991c792011-12-20 15:53:11 +02001135const char *memory_region_name(MemoryRegion *mr)
1136{
1137 return mr->name;
1138}
1139
Avi Kivity8ea92522011-12-08 15:58:43 +02001140bool memory_region_is_ram(MemoryRegion *mr)
1141{
1142 return mr->ram;
1143}
1144
Avi Kivity55043ba2011-12-15 17:20:34 +02001145bool memory_region_is_logging(MemoryRegion *mr)
1146{
1147 return mr->dirty_log_mask;
1148}
1149
Avi Kivityce7923d2011-12-08 16:05:11 +02001150bool memory_region_is_rom(MemoryRegion *mr)
1151{
1152 return mr->ram && mr->readonly;
1153}
1154
Avi Kivity30951152012-10-30 13:47:46 +02001155bool memory_region_is_iommu(MemoryRegion *mr)
1156{
1157 return mr->iommu_ops;
1158}
1159
David Gibson06866572013-05-14 19:13:56 +10001160void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
1161{
1162 notifier_list_add(&mr->iommu_notify, n);
1163}
1164
1165void memory_region_unregister_iommu_notifier(Notifier *n)
1166{
1167 notifier_remove(n);
1168}
1169
1170void memory_region_notify_iommu(MemoryRegion *mr,
1171 IOMMUTLBEntry entry)
1172{
1173 assert(memory_region_is_iommu(mr));
1174 notifier_list_notify(&mr->iommu_notify, &entry);
1175}
1176
Avi Kivity093bc2c2011-07-26 14:26:01 +03001177void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1178{
Avi Kivity5a583342011-07-26 14:26:02 +03001179 uint8_t mask = 1 << client;
1180
Jan Kiszka59023ef2012-08-23 13:02:30 +02001181 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001182 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001183 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001184 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001185}
1186
Avi Kivitya8170e52012-10-23 12:30:10 +02001187bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1188 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001189{
Avi Kivity14a3c102011-07-26 14:26:06 +03001190 assert(mr->terminates);
Juan Quintela52159192013-10-08 12:44:04 +02001191 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001192}
1193
Avi Kivitya8170e52012-10-23 12:30:10 +02001194void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1195 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001196{
Avi Kivity14a3c102011-07-26 14:26:06 +03001197 assert(mr->terminates);
Juan Quintela75218e72013-10-08 12:31:54 +02001198 cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001199}
1200
Juan Quintela6c279db2012-10-17 20:24:28 +02001201bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1202 hwaddr size, unsigned client)
1203{
1204 bool ret;
1205 assert(mr->terminates);
Juan Quintela52159192013-10-08 12:44:04 +02001206 ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001207 if (ret) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001208 cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001209 }
1210 return ret;
1211}
1212
1213
Avi Kivity093bc2c2011-07-26 14:26:01 +03001214void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1215{
Avi Kivity0d673e32012-10-02 15:28:50 +02001216 AddressSpace *as;
Avi Kivity5a583342011-07-26 14:26:02 +03001217 FlatRange *fr;
1218
Avi Kivity0d673e32012-10-02 15:28:50 +02001219 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Paolo Bonzini856d7242013-05-06 11:57:21 +02001220 FlatView *view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001221 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001222 if (fr->mr == mr) {
1223 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
1224 }
Avi Kivity5a583342011-07-26 14:26:02 +03001225 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001226 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001227 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001228}
1229
1230void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1231{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001232 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001233 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001234 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01001235 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001236 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001237 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001238}
1239
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001240void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001241{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001242 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001243 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001244 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01001245 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001246 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001247 }
1248}
1249
Avi Kivitya8170e52012-10-23 12:30:10 +02001250void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1251 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001252{
Avi Kivity14a3c102011-07-26 14:26:06 +03001253 assert(mr->terminates);
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001254 cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001255}
1256
1257void *memory_region_get_ram_ptr(MemoryRegion *mr)
1258{
1259 if (mr->alias) {
1260 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
1261 }
1262
Avi Kivity14a3c102011-07-26 14:26:06 +03001263 assert(mr->terminates);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001264
Jan Kiszka021d26d2011-08-30 00:38:24 +02001265 return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001266}
1267
Avi Kivity0d673e32012-10-02 15:28:50 +02001268static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001269{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001270 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001271 FlatRange *fr;
1272 CoalescedMemoryRange *cmr;
1273 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02001274 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001275
Paolo Bonzini856d7242013-05-06 11:57:21 +02001276 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001277 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001278 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02001279 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +02001280 .address_space = as,
Avi Kivity95d29942012-10-02 18:21:54 +02001281 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001282 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02001283 };
1284
1285 MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
1286 int128_get64(fr->addr.start),
1287 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001288 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1289 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02001290 int128_sub(fr->addr.start,
1291 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001292 if (!addrrange_intersects(tmp, fr->addr)) {
1293 continue;
1294 }
1295 tmp = addrrange_intersection(tmp, fr->addr);
Avi Kivity95d29942012-10-02 18:21:54 +02001296 MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
1297 int128_get64(tmp.start),
1298 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001299 }
1300 }
1301 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001302 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001303}
1304
Avi Kivity0d673e32012-10-02 15:28:50 +02001305static void memory_region_update_coalesced_range(MemoryRegion *mr)
1306{
1307 AddressSpace *as;
1308
1309 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1310 memory_region_update_coalesced_range_as(mr, as);
1311 }
1312}
1313
Avi Kivity093bc2c2011-07-26 14:26:01 +03001314void memory_region_set_coalescing(MemoryRegion *mr)
1315{
1316 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02001317 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001318}
1319
1320void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001321 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001322 uint64_t size)
1323{
Anthony Liguori7267c092011-08-20 22:09:37 -05001324 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001325
Avi Kivity08dafab2011-10-16 13:19:17 +02001326 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001327 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1328 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02001329 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001330}
1331
1332void memory_region_clear_coalescing(MemoryRegion *mr)
1333{
1334 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08001335 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001336
Jan Kiszkad4105152012-08-23 13:02:29 +02001337 qemu_flush_coalesced_mmio_buffer();
1338 mr->flush_coalesced_mmio = false;
1339
Avi Kivity093bc2c2011-07-26 14:26:01 +03001340 while (!QTAILQ_EMPTY(&mr->coalesced)) {
1341 cmr = QTAILQ_FIRST(&mr->coalesced);
1342 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05001343 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08001344 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001345 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08001346
1347 if (updated) {
1348 memory_region_update_coalesced_range(mr);
1349 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001350}
1351
Jan Kiszkad4105152012-08-23 13:02:29 +02001352void memory_region_set_flush_coalesced(MemoryRegion *mr)
1353{
1354 mr->flush_coalesced_mmio = true;
1355}
1356
1357void memory_region_clear_flush_coalesced(MemoryRegion *mr)
1358{
1359 qemu_flush_coalesced_mmio_buffer();
1360 if (QTAILQ_EMPTY(&mr->coalesced)) {
1361 mr->flush_coalesced_mmio = false;
1362 }
1363}
1364
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001365void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001366 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001367 unsigned size,
1368 bool match_data,
1369 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02001370 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001371{
1372 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02001373 .addr.start = int128_make64(addr),
1374 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001375 .match_data = match_data,
1376 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02001377 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001378 };
1379 unsigned i;
1380
Alexander Graf28f362b2012-10-15 20:30:28 +02001381 adjust_endianness(mr, &mrfd.data, size);
Jan Kiszka59023ef2012-08-23 13:02:30 +02001382 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001383 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1384 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1385 break;
1386 }
1387 }
1388 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05001389 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001390 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1391 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1392 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1393 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08001394 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001395 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001396}
1397
1398void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001399 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001400 unsigned size,
1401 bool match_data,
1402 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02001403 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001404{
1405 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02001406 .addr.start = int128_make64(addr),
1407 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001408 .match_data = match_data,
1409 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02001410 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001411 };
1412 unsigned i;
1413
Alexander Graf28f362b2012-10-15 20:30:28 +02001414 adjust_endianness(mr, &mrfd.data, size);
Jan Kiszka59023ef2012-08-23 13:02:30 +02001415 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001416 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1417 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1418 break;
1419 }
1420 }
1421 assert(i != mr->ioeventfd_nb);
1422 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1423 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1424 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05001425 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001426 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08001427 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001428 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03001429}
1430
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001431static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001432{
Peter Crosthwaite05987012014-06-05 23:14:44 -07001433 hwaddr offset = subregion->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001434 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001435 MemoryRegion *other;
1436
Jan Kiszka59023ef2012-08-23 13:02:30 +02001437 memory_region_transaction_begin();
1438
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001439 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001440 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1441 if (subregion->may_overlap || other->may_overlap) {
1442 continue;
1443 }
Hu Tao2c7cfd62013-03-15 14:32:01 +08001444 if (int128_ge(int128_make64(offset),
Avi Kivity08dafab2011-10-16 13:19:17 +02001445 int128_add(int128_make64(other->addr), other->size))
1446 || int128_le(int128_add(int128_make64(offset), subregion->size),
1447 int128_make64(other->addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001448 continue;
1449 }
Anthony Liguoria5e1cbc2011-08-22 11:14:56 -05001450#if 0
Michael Walle860329b2011-09-15 23:16:49 +02001451 printf("warning: subregion collision %llx/%llx (%s) "
1452 "vs %llx/%llx (%s)\n",
Avi Kivity093bc2c2011-07-26 14:26:01 +03001453 (unsigned long long)offset,
Avi Kivity08dafab2011-10-16 13:19:17 +02001454 (unsigned long long)int128_get64(subregion->size),
Michael Walle860329b2011-09-15 23:16:49 +02001455 subregion->name,
1456 (unsigned long long)other->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02001457 (unsigned long long)int128_get64(other->size),
Michael Walle860329b2011-09-15 23:16:49 +02001458 other->name);
Anthony Liguoria5e1cbc2011-08-22 11:14:56 -05001459#endif
Avi Kivity093bc2c2011-07-26 14:26:01 +03001460 }
1461 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1462 if (subregion->priority >= other->priority) {
1463 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1464 goto done;
1465 }
1466 }
1467 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1468done:
Jan Kiszka22bde712012-11-05 16:45:56 +01001469 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001470 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001471}
1472
Peter Crosthwaite05987012014-06-05 23:14:44 -07001473static void memory_region_add_subregion_common(MemoryRegion *mr,
1474 hwaddr offset,
1475 MemoryRegion *subregion)
1476{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001477 assert(!subregion->container);
1478 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07001479 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001480 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07001481}
Avi Kivity093bc2c2011-07-26 14:26:01 +03001482
1483void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001484 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001485 MemoryRegion *subregion)
1486{
1487 subregion->may_overlap = false;
1488 subregion->priority = 0;
1489 memory_region_add_subregion_common(mr, offset, subregion);
1490}
1491
1492void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001493 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001494 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03001495 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001496{
1497 subregion->may_overlap = true;
1498 subregion->priority = priority;
1499 memory_region_add_subregion_common(mr, offset, subregion);
1500}
1501
1502void memory_region_del_subregion(MemoryRegion *mr,
1503 MemoryRegion *subregion)
1504{
Jan Kiszka59023ef2012-08-23 13:02:30 +02001505 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001506 assert(subregion->container == mr);
1507 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001508 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001509 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01001510 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001511 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03001512}
1513
1514void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
1515{
1516 if (enabled == mr->enabled) {
1517 return;
1518 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02001519 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03001520 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01001521 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001522 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001523}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03001524
Peter Crosthwaite67891b82014-06-05 23:15:18 -07001525static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03001526{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001527 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03001528
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001529 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07001530 memory_region_transaction_begin();
1531 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001532 memory_region_del_subregion(container, mr);
1533 mr->container = container;
1534 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07001535 memory_region_unref(mr);
1536 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03001537 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07001538}
Avi Kivity2282e1a2011-09-14 12:10:12 +03001539
Peter Crosthwaite67891b82014-06-05 23:15:18 -07001540void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
1541{
1542 if (addr != mr->addr) {
1543 mr->addr = addr;
1544 memory_region_readd_subregion(mr);
1545 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03001546}
1547
Avi Kivitya8170e52012-10-23 12:30:10 +02001548void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02001549{
Avi Kivity47033592011-12-04 19:16:50 +02001550 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02001551
Jan Kiszka59023ef2012-08-23 13:02:30 +02001552 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02001553 return;
1554 }
1555
Jan Kiszka59023ef2012-08-23 13:02:30 +02001556 memory_region_transaction_begin();
1557 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01001558 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001559 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02001560}
1561
Avi Kivitye34911c2011-12-19 12:06:23 +02001562ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1563{
Avi Kivitye34911c2011-12-19 12:06:23 +02001564 return mr->ram_addr;
1565}
1566
Avi Kivitye2177952011-12-08 15:00:18 +02001567static int cmp_flatrange_addr(const void *addr_, const void *fr_)
1568{
1569 const AddrRange *addr = addr_;
1570 const FlatRange *fr = fr_;
1571
1572 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
1573 return -1;
1574 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
1575 return 1;
1576 }
1577 return 0;
1578}
1579
Paolo Bonzini99e86342013-05-06 10:26:13 +02001580static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02001581{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001582 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02001583 sizeof(FlatRange), cmp_flatrange_addr);
1584}
1585
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001586bool memory_region_present(MemoryRegion *container, hwaddr addr)
Paolo Bonzini3ce10902013-07-02 13:40:48 +02001587{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001588 MemoryRegion *mr = memory_region_find(container, addr, 1).mr;
1589 if (!mr || (mr == container)) {
Paolo Bonzini3ce10902013-07-02 13:40:48 +02001590 return false;
1591 }
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001592 memory_region_unref(mr);
Paolo Bonzini3ce10902013-07-02 13:40:48 +02001593 return true;
1594}
1595
Paolo Bonzini73034e92013-05-07 15:48:28 +02001596MemoryRegionSection memory_region_find(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001597 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02001598{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001599 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02001600 MemoryRegion *root;
1601 AddressSpace *as;
1602 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02001603 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02001604 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02001605
Paolo Bonzini73034e92013-05-07 15:48:28 +02001606 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001607 for (root = mr; root->container; ) {
1608 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02001609 addr += root->addr;
1610 }
1611
1612 as = memory_region_to_address_space(root);
1613 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02001614
Paolo Bonzini856d7242013-05-06 11:57:21 +02001615 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001616 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02001617 if (!fr) {
Marcel Apfelbaum6307d972013-12-02 16:20:59 +02001618 flatview_unref(view);
Avi Kivitye2177952011-12-08 15:00:18 +02001619 return ret;
1620 }
1621
Paolo Bonzini99e86342013-05-06 10:26:13 +02001622 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02001623 --fr;
1624 }
1625
1626 ret.mr = fr->mr;
Paolo Bonzini73034e92013-05-07 15:48:28 +02001627 ret.address_space = as;
Avi Kivitye2177952011-12-08 15:00:18 +02001628 range = addrrange_intersection(range, fr->addr);
1629 ret.offset_within_region = fr->offset_in_region;
1630 ret.offset_within_region += int128_get64(int128_sub(range.start,
1631 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001632 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02001633 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02001634 ret.readonly = fr->readonly;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001635 memory_region_ref(ret.mr);
1636
Paolo Bonzini856d7242013-05-06 11:57:21 +02001637 flatview_unref(view);
Avi Kivitye2177952011-12-08 15:00:18 +02001638 return ret;
1639}
1640
Paolo Bonzini1d671362013-04-24 10:46:55 +02001641void address_space_sync_dirty_bitmap(AddressSpace *as)
Avi Kivity86e775c2011-12-15 16:24:49 +02001642{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001643 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02001644 FlatRange *fr;
1645
Paolo Bonzini856d7242013-05-06 11:57:21 +02001646 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001647 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity72e22d22012-02-08 15:05:50 +02001648 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
Avi Kivity7664e802011-12-11 14:47:25 +02001649 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001650 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02001651}
1652
1653void memory_global_dirty_log_start(void)
1654{
Avi Kivity7664e802011-12-11 14:47:25 +02001655 global_dirty_log = true;
Avi Kivity7376e582012-02-08 21:05:17 +02001656 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Avi Kivity7664e802011-12-11 14:47:25 +02001657}
1658
1659void memory_global_dirty_log_stop(void)
1660{
Avi Kivity7664e802011-12-11 14:47:25 +02001661 global_dirty_log = false;
Avi Kivity7376e582012-02-08 21:05:17 +02001662 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02001663}
1664
1665static void listener_add_address_space(MemoryListener *listener,
1666 AddressSpace *as)
1667{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001668 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02001669 FlatRange *fr;
1670
Julien Grall221b3a32012-04-04 15:15:41 +01001671 if (listener->address_space_filter
Avi Kivityf6790af2012-10-02 20:13:51 +02001672 && listener->address_space_filter != as) {
Julien Grall221b3a32012-04-04 15:15:41 +01001673 return;
1674 }
1675
Avi Kivity7664e802011-12-11 14:47:25 +02001676 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02001677 if (listener->log_global_start) {
1678 listener->log_global_start(listener);
1679 }
Avi Kivity7664e802011-12-11 14:47:25 +02001680 }
Avi Kivity975aefe2012-10-02 16:39:57 +02001681
Paolo Bonzini856d7242013-05-06 11:57:21 +02001682 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001683 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity7664e802011-12-11 14:47:25 +02001684 MemoryRegionSection section = {
1685 .mr = fr->mr,
Avi Kivityf6790af2012-10-02 20:13:51 +02001686 .address_space = as,
Avi Kivity7664e802011-12-11 14:47:25 +02001687 .offset_within_region = fr->offset_in_region,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001688 .size = fr->addr.size,
Avi Kivity7664e802011-12-11 14:47:25 +02001689 .offset_within_address_space = int128_get64(fr->addr.start),
Avi Kivity7a8499e2012-02-08 17:01:23 +02001690 .readonly = fr->readonly,
Avi Kivity7664e802011-12-11 14:47:25 +02001691 };
Avi Kivity975aefe2012-10-02 16:39:57 +02001692 if (listener->region_add) {
1693 listener->region_add(listener, &section);
1694 }
Avi Kivity7664e802011-12-11 14:47:25 +02001695 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001696 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02001697}
1698
Avi Kivityf6790af2012-10-02 20:13:51 +02001699void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
Avi Kivity7664e802011-12-11 14:47:25 +02001700{
Avi Kivity72e22d22012-02-08 15:05:50 +02001701 MemoryListener *other = NULL;
Avi Kivity0d673e32012-10-02 15:28:50 +02001702 AddressSpace *as;
Avi Kivity72e22d22012-02-08 15:05:50 +02001703
Avi Kivity7376e582012-02-08 21:05:17 +02001704 listener->address_space_filter = filter;
Avi Kivity72e22d22012-02-08 15:05:50 +02001705 if (QTAILQ_EMPTY(&memory_listeners)
1706 || listener->priority >= QTAILQ_LAST(&memory_listeners,
1707 memory_listeners)->priority) {
1708 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
1709 } else {
1710 QTAILQ_FOREACH(other, &memory_listeners, link) {
1711 if (listener->priority < other->priority) {
1712 break;
1713 }
1714 }
1715 QTAILQ_INSERT_BEFORE(other, listener, link);
1716 }
Avi Kivity0d673e32012-10-02 15:28:50 +02001717
1718 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1719 listener_add_address_space(listener, as);
1720 }
Avi Kivity7664e802011-12-11 14:47:25 +02001721}
1722
1723void memory_listener_unregister(MemoryListener *listener)
1724{
Avi Kivity72e22d22012-02-08 15:05:50 +02001725 QTAILQ_REMOVE(&memory_listeners, listener, link);
Avi Kivity86e775c2011-12-15 16:24:49 +02001726}
Avi Kivitye2177952011-12-08 15:00:18 +02001727
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001728void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03001729{
Paolo Bonzini856d7242013-05-06 11:57:21 +02001730 if (QTAILQ_EMPTY(&address_spaces)) {
1731 memory_init();
1732 }
1733
Jan Kiszka59023ef2012-08-23 13:02:30 +02001734 memory_region_transaction_begin();
Avi Kivity8786db72012-10-02 13:53:41 +02001735 as->root = root;
1736 as->current_map = g_new(FlatView, 1);
1737 flatview_init(as->current_map);
Avi Kivity4c19eb72012-10-30 13:47:44 +02001738 as->ioeventfd_nb = 0;
1739 as->ioeventfds = NULL;
Avi Kivity0d673e32012-10-02 15:28:50 +02001740 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001741 as->name = g_strdup(name ? name : "anonymous");
Avi Kivityac1970f2012-10-03 16:22:53 +02001742 address_space_init_dispatch(as);
Paolo Bonzinif43793c2013-04-16 15:39:51 +02001743 memory_region_update_pending |= root->enabled;
1744 memory_region_transaction_commit();
Avi Kivity1c0ffa52011-07-26 14:26:04 +03001745}
Avi Kivity658b2222011-07-26 14:26:08 +03001746
Avi Kivity83f3c252012-10-07 12:59:55 +02001747void address_space_destroy(AddressSpace *as)
1748{
David Gibson078c44f2014-05-30 12:59:00 -06001749 MemoryListener *listener;
1750
Avi Kivity83f3c252012-10-07 12:59:55 +02001751 /* Flush out anything from MemoryListeners listening in on this */
1752 memory_region_transaction_begin();
1753 as->root = NULL;
1754 memory_region_transaction_commit();
1755 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
1756 address_space_destroy_dispatch(as);
David Gibson078c44f2014-05-30 12:59:00 -06001757
1758 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1759 assert(listener->address_space_filter != as);
1760 }
1761
Paolo Bonzini856d7242013-05-06 11:57:21 +02001762 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001763 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02001764 g_free(as->ioeventfds);
Avi Kivity83f3c252012-10-07 12:59:55 +02001765}
1766
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001767bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size)
Avi Kivityacbbec52011-11-21 12:27:03 +02001768{
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001769 return memory_region_dispatch_read(mr, addr, pval, size);
Avi Kivityacbbec52011-11-21 12:27:03 +02001770}
1771
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001772bool io_mem_write(MemoryRegion *mr, hwaddr addr,
Avi Kivityacbbec52011-11-21 12:27:03 +02001773 uint64_t val, unsigned size)
1774{
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001775 return memory_region_dispatch_write(mr, addr, val, size);
Avi Kivityacbbec52011-11-21 12:27:03 +02001776}
1777
Blue Swirl314e2982011-09-11 20:22:05 +00001778typedef struct MemoryRegionList MemoryRegionList;
1779
1780struct MemoryRegionList {
1781 const MemoryRegion *mr;
1782 bool printed;
1783 QTAILQ_ENTRY(MemoryRegionList) queue;
1784};
1785
1786typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
1787
1788static void mtree_print_mr(fprintf_function mon_printf, void *f,
1789 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02001790 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02001791 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00001792{
Jan Kiszka9479c572011-09-27 15:00:41 +02001793 MemoryRegionList *new_ml, *ml, *next_ml;
1794 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00001795 const MemoryRegion *submr;
1796 unsigned int i;
1797
Jan Kiszka7ea692b2012-10-31 10:49:02 +01001798 if (!mr || !mr->enabled) {
Blue Swirl314e2982011-09-11 20:22:05 +00001799 return;
1800 }
1801
1802 for (i = 0; i < level; i++) {
1803 mon_printf(f, " ");
1804 }
1805
1806 if (mr->alias) {
1807 MemoryRegionList *ml;
1808 bool found = false;
1809
1810 /* check if the alias is already in the queue */
Jan Kiszka9479c572011-09-27 15:00:41 +02001811 QTAILQ_FOREACH(ml, alias_print_queue, queue) {
Blue Swirl314e2982011-09-11 20:22:05 +00001812 if (ml->mr == mr->alias && !ml->printed) {
1813 found = true;
1814 }
1815 }
1816
1817 if (!found) {
1818 ml = g_new(MemoryRegionList, 1);
1819 ml->mr = mr->alias;
1820 ml->printed = false;
Jan Kiszka9479c572011-09-27 15:00:41 +02001821 QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
Blue Swirl314e2982011-09-11 20:22:05 +00001822 }
Jan Kiszka4896d742012-02-04 16:25:42 +01001823 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
1824 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
1825 "-" TARGET_FMT_plx "\n",
Blue Swirl314e2982011-09-11 20:22:05 +00001826 base + mr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02001827 base + mr->addr
Alex Williamsonfd1d9922013-07-19 12:42:12 -06001828 + (int128_nz(mr->size) ?
1829 (hwaddr)int128_get64(int128_sub(mr->size,
1830 int128_one())) : 0),
Jan Kiszka4b474ba2011-09-27 15:00:31 +02001831 mr->priority,
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001832 mr->romd_mode ? 'R' : '-',
1833 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
1834 : '-',
Blue Swirl314e2982011-09-11 20:22:05 +00001835 mr->name,
1836 mr->alias->name,
1837 mr->alias_offset,
Avi Kivity08dafab2011-10-16 13:19:17 +02001838 mr->alias_offset
Alexey Kardashevskiya66670c2013-08-30 18:10:38 +10001839 + (int128_nz(mr->size) ?
1840 (hwaddr)int128_get64(int128_sub(mr->size,
1841 int128_one())) : 0));
Blue Swirl314e2982011-09-11 20:22:05 +00001842 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01001843 mon_printf(f,
1844 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n",
Blue Swirl314e2982011-09-11 20:22:05 +00001845 base + mr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02001846 base + mr->addr
Alex Williamsonfd1d9922013-07-19 12:42:12 -06001847 + (int128_nz(mr->size) ?
1848 (hwaddr)int128_get64(int128_sub(mr->size,
1849 int128_one())) : 0),
Jan Kiszka4b474ba2011-09-27 15:00:31 +02001850 mr->priority,
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001851 mr->romd_mode ? 'R' : '-',
1852 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
1853 : '-',
Blue Swirl314e2982011-09-11 20:22:05 +00001854 mr->name);
1855 }
Jan Kiszka9479c572011-09-27 15:00:41 +02001856
1857 QTAILQ_INIT(&submr_print_queue);
1858
Blue Swirl314e2982011-09-11 20:22:05 +00001859 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02001860 new_ml = g_new(MemoryRegionList, 1);
1861 new_ml->mr = submr;
1862 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
1863 if (new_ml->mr->addr < ml->mr->addr ||
1864 (new_ml->mr->addr == ml->mr->addr &&
1865 new_ml->mr->priority > ml->mr->priority)) {
1866 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
1867 new_ml = NULL;
1868 break;
1869 }
1870 }
1871 if (new_ml) {
1872 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
1873 }
1874 }
1875
1876 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
1877 mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
1878 alias_print_queue);
1879 }
1880
Avi Kivity88365e42011-11-13 12:00:55 +02001881 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02001882 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00001883 }
1884}
1885
1886void mtree_info(fprintf_function mon_printf, void *f)
1887{
1888 MemoryRegionListHead ml_head;
1889 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02001890 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00001891
1892 QTAILQ_INIT(&ml_head);
1893
Avi Kivity0d673e32012-10-02 15:28:50 +02001894 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001895 mon_printf(f, "%s\n", as->name);
1896 mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head);
Blue Swirlb9f9be82012-03-10 16:58:35 +00001897 }
1898
1899 mon_printf(f, "aliases\n");
Blue Swirl314e2982011-09-11 20:22:05 +00001900 /* print aliased regions */
1901 QTAILQ_FOREACH(ml, &ml_head, queue) {
1902 if (!ml->printed) {
1903 mon_printf(f, "%s\n", ml->mr->name);
1904 mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
1905 }
1906 }
1907
1908 QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02001909 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00001910 }
Blue Swirl314e2982011-09-11 20:22:05 +00001911}