blob: 16784941b5542a95b4f784a6a6555de474bfc585 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef MEMORY_H
15#define MEMORY_H
16
17#ifndef CONFIG_USER_ONLY
18
Juan Quintela1ab4c8c2013-10-08 16:14:39 +020019#define DIRTY_MEMORY_VGA 0
20#define DIRTY_MEMORY_CODE 1
21#define DIRTY_MEMORY_MIGRATION 2
22#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
23
Paolo Bonzini022c62c2012-12-17 18:19:49 +010024#include "exec/cpu-common.h"
Andreas Färberce927ed2013-05-28 14:02:38 +020025#ifndef CONFIG_USER_ONLY
Paolo Bonzini022c62c2012-12-17 18:19:49 +010026#include "exec/hwaddr.h"
Andreas Färberce927ed2013-05-28 14:02:38 +020027#endif
Peter Maydellcc05c432015-04-26 16:49:23 +010028#include "exec/memattrs.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010029#include "qemu/queue.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/int128.h"
David Gibson06866572013-05-14 19:13:56 +100031#include "qemu/notify.h"
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -070032#include "qom/object.h"
Paolo Bonzini374f2982013-05-17 12:37:03 +020033#include "qemu/rcu.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030034
Paolo Bonzini052e87b2013-05-27 10:08:27 +020035#define MAX_PHYS_ADDR_SPACE_BITS 62
36#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
37
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -070038#define TYPE_MEMORY_REGION "qemu:memory-region"
39#define MEMORY_REGION(obj) \
40 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
41
Avi Kivity093bc2c2011-07-26 14:26:01 +030042typedef struct MemoryRegionOps MemoryRegionOps;
Avi Kivity74901c32011-07-26 14:26:10 +030043typedef struct MemoryRegionMmio MemoryRegionMmio;
Avi Kivity093bc2c2011-07-26 14:26:01 +030044
Avi Kivity74901c32011-07-26 14:26:10 +030045struct MemoryRegionMmio {
46 CPUReadMemoryFunc *read[3];
47 CPUWriteMemoryFunc *write[3];
48};
49
Avi Kivity30951152012-10-30 13:47:46 +020050typedef struct IOMMUTLBEntry IOMMUTLBEntry;
51
52/* See address_space_translate: bit 0 is read, bit 1 is write. */
53typedef enum {
54 IOMMU_NONE = 0,
55 IOMMU_RO = 1,
56 IOMMU_WO = 2,
57 IOMMU_RW = 3,
58} IOMMUAccessFlags;
59
60struct IOMMUTLBEntry {
61 AddressSpace *target_as;
62 hwaddr iova;
63 hwaddr translated_addr;
64 hwaddr addr_mask; /* 0xfff = 4k translation */
65 IOMMUAccessFlags perm;
66};
67
Peter Maydellcc05c432015-04-26 16:49:23 +010068/* New-style MMIO accessors can indicate that the transaction failed.
69 * A zero (MEMTX_OK) response means success; anything else is a failure
70 * of some kind. The memory subsystem will bitwise-OR together results
71 * if it is synthesizing an operation from multiple smaller accesses.
72 */
73#define MEMTX_OK 0
74#define MEMTX_ERROR (1U << 0) /* device returned an error */
75#define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
76typedef uint32_t MemTxResult;
77
Avi Kivity093bc2c2011-07-26 14:26:01 +030078/*
79 * Memory region callbacks
80 */
81struct MemoryRegionOps {
82 /* Read from the memory region. @addr is relative to @mr; @size is
83 * in bytes. */
84 uint64_t (*read)(void *opaque,
Avi Kivitya8170e52012-10-23 12:30:10 +020085 hwaddr addr,
Avi Kivity093bc2c2011-07-26 14:26:01 +030086 unsigned size);
87 /* Write to the memory region. @addr is relative to @mr; @size is
88 * in bytes. */
89 void (*write)(void *opaque,
Avi Kivitya8170e52012-10-23 12:30:10 +020090 hwaddr addr,
Avi Kivity093bc2c2011-07-26 14:26:01 +030091 uint64_t data,
92 unsigned size);
93
Peter Maydellcc05c432015-04-26 16:49:23 +010094 MemTxResult (*read_with_attrs)(void *opaque,
95 hwaddr addr,
96 uint64_t *data,
97 unsigned size,
98 MemTxAttrs attrs);
99 MemTxResult (*write_with_attrs)(void *opaque,
100 hwaddr addr,
101 uint64_t data,
102 unsigned size,
103 MemTxAttrs attrs);
104
Avi Kivity093bc2c2011-07-26 14:26:01 +0300105 enum device_endian endianness;
106 /* Guest-visible constraints: */
107 struct {
108 /* If nonzero, specify bounds on access sizes beyond which a machine
109 * check is thrown.
110 */
111 unsigned min_access_size;
112 unsigned max_access_size;
113 /* If true, unaligned accesses are supported. Otherwise unaligned
114 * accesses throw machine checks.
115 */
116 bool unaligned;
Avi Kivity897fa7c2011-11-13 13:05:27 +0200117 /*
118 * If present, and returns #false, the transaction is not accepted
119 * by the device (and results in machine dependent behaviour such
120 * as a machine check exception).
121 */
Avi Kivitya8170e52012-10-23 12:30:10 +0200122 bool (*accepts)(void *opaque, hwaddr addr,
Avi Kivity897fa7c2011-11-13 13:05:27 +0200123 unsigned size, bool is_write);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300124 } valid;
125 /* Internal implementation constraints: */
126 struct {
127 /* If nonzero, specifies the minimum size implemented. Smaller sizes
128 * will be rounded upwards and a partial result will be returned.
129 */
130 unsigned min_access_size;
131 /* If nonzero, specifies the maximum size implemented. Larger sizes
132 * will be done as a series of accesses with smaller sizes.
133 */
134 unsigned max_access_size;
135 /* If true, unaligned accesses are supported. Otherwise all accesses
136 * are converted to (possibly multiple) naturally aligned accesses.
137 */
Fam Zhengedc1ba72014-05-05 15:53:41 +0800138 bool unaligned;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300139 } impl;
Avi Kivity627a0e92011-07-26 14:26:09 +0300140
Avi Kivity74901c32011-07-26 14:26:10 +0300141 /* If .read and .write are not present, old_mmio may be used for
142 * backwards compatibility with old mmio registration
143 */
144 const MemoryRegionMmio old_mmio;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300145};
146
Avi Kivity30951152012-10-30 13:47:46 +0200147typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
148
149struct MemoryRegionIOMMUOps {
150 /* Return a TLB entry that contains a given address. */
Le Tan8d7b8cb2014-08-16 13:55:37 +0800151 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200152};
153
Avi Kivity093bc2c2011-07-26 14:26:01 +0300154typedef struct CoalescedMemoryRange CoalescedMemoryRange;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300155typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300156
157struct MemoryRegion {
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700158 Object parent_obj;
Paolo Bonzinia6768542015-12-09 11:40:14 +0100159
Avi Kivity093bc2c2011-07-26 14:26:01 +0300160 /* All fields are private - violators will be prosecuted */
Paolo Bonzinia6768542015-12-09 11:40:14 +0100161
162 /* The following fields should fit in a cache line */
163 bool romd_mode;
164 bool ram;
165 bool subpage;
166 bool readonly; /* For RAM regions */
167 bool rom_device;
168 bool flush_coalesced_mmio;
169 bool global_locking;
170 uint8_t dirty_log_mask;
Gonglei58eaa212016-02-22 16:34:55 +0800171 RAMBlock *ram_block;
Paolo Bonzini612263c2015-12-09 11:44:25 +0100172 Object *owner;
Avi Kivity30951152012-10-30 13:47:46 +0200173 const MemoryRegionIOMMUOps *iommu_ops;
Paolo Bonzinia6768542015-12-09 11:40:14 +0100174
175 const MemoryRegionOps *ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300176 void *opaque;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200177 MemoryRegion *container;
Avi Kivity08dafab2011-10-16 13:19:17 +0200178 Int128 size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200179 hwaddr addr;
Avi Kivity545e92e2011-08-08 19:58:48 +0300180 void (*destructor)(MemoryRegion *mr);
Igor Mammedova2b257d2014-10-31 16:38:37 +0000181 uint64_t align;
Avi Kivity14a3c102011-07-26 14:26:06 +0300182 bool terminates;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +0530183 bool skip_dump;
Avi Kivity6bba19b2011-09-14 11:54:58 +0300184 bool enabled;
Jan Kiszka1660e722011-10-23 16:01:19 +0200185 bool warning_printed; /* For reservations */
Paolo Bonzinideb809e2015-07-14 13:56:53 +0200186 uint8_t vga_logging_count;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300187 MemoryRegion *alias;
Avi Kivitya8170e52012-10-23 12:30:10 +0200188 hwaddr alias_offset;
Peter Crosthwaited33382d2014-06-05 23:17:01 -0700189 int32_t priority;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300190 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
191 QTAILQ_ENTRY(MemoryRegion) subregions_link;
192 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
Peter Maydell302fa282014-08-19 20:05:46 +0100193 const char *name;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300194 unsigned ioeventfd_nb;
195 MemoryRegionIoeventfd *ioeventfds;
David Gibson06866572013-05-14 19:13:56 +1000196 NotifierList iommu_notify;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300197};
198
Paolo Bonzinic2fc83e2013-06-02 15:20:47 +0200199/**
200 * MemoryListener: callbacks structure for updates to the physical memory map
201 *
202 * Allows a component to adjust to changes in the guest-visible memory map.
203 * Use with memory_listener_register() and memory_listener_unregister().
204 */
205struct MemoryListener {
206 void (*begin)(MemoryListener *listener);
207 void (*commit)(MemoryListener *listener);
208 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
209 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
210 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200211 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
212 int old, int new);
213 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
214 int old, int new);
Paolo Bonzinic2fc83e2013-06-02 15:20:47 +0200215 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
216 void (*log_global_start)(MemoryListener *listener);
217 void (*log_global_stop)(MemoryListener *listener);
218 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
219 bool match_data, uint64_t data, EventNotifier *e);
220 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
221 bool match_data, uint64_t data, EventNotifier *e);
222 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
223 hwaddr addr, hwaddr len);
224 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
225 hwaddr addr, hwaddr len);
226 /* Lower = earlier (during add), later (during del) */
227 unsigned priority;
228 AddressSpace *address_space_filter;
229 QTAILQ_ENTRY(MemoryListener) link;
230};
231
Avi Kivity9ad2bbc2012-10-02 14:59:23 +0200232/**
233 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
234 */
235struct AddressSpace {
236 /* All fields are private. */
Paolo Bonzini374f2982013-05-17 12:37:03 +0200237 struct rcu_head rcu;
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +0000238 char *name;
Avi Kivity9ad2bbc2012-10-02 14:59:23 +0200239 MemoryRegion *root;
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +0000240 int ref_count;
241 bool malloced;
Paolo Bonzini374f2982013-05-17 12:37:03 +0200242
243 /* Accessed via RCU. */
Avi Kivity9ad2bbc2012-10-02 14:59:23 +0200244 struct FlatView *current_map;
Paolo Bonzini374f2982013-05-17 12:37:03 +0200245
Avi Kivity9ad2bbc2012-10-02 14:59:23 +0200246 int ioeventfd_nb;
247 struct MemoryRegionIoeventfd *ioeventfds;
Avi Kivityac1970f2012-10-03 16:22:53 +0200248 struct AddressSpaceDispatch *dispatch;
Paolo Bonzini00752702013-05-29 12:13:54 +0200249 struct AddressSpaceDispatch *next_dispatch;
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200250 MemoryListener dispatch_listener;
251
Avi Kivity0d673e32012-10-02 15:28:50 +0200252 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
Avi Kivity9ad2bbc2012-10-02 14:59:23 +0200253};
254
Avi Kivitye2177952011-12-08 15:00:18 +0200255/**
256 * MemoryRegionSection: describes a fragment of a #MemoryRegion
257 *
258 * @mr: the region, or %NULL if empty
Avi Kivity7664e802011-12-11 14:47:25 +0200259 * @address_space: the address space the region is mapped in
Avi Kivitye2177952011-12-08 15:00:18 +0200260 * @offset_within_region: the beginning of the section, relative to @mr's start
261 * @size: the size of the section; will not exceed @mr's boundaries
262 * @offset_within_address_space: the address of the first byte of the section
263 * relative to the region's address space
Avi Kivity7a8499e2012-02-08 17:01:23 +0200264 * @readonly: writes to this section are ignored
Avi Kivitye2177952011-12-08 15:00:18 +0200265 */
266struct MemoryRegionSection {
267 MemoryRegion *mr;
Avi Kivityf6790af2012-10-02 20:13:51 +0200268 AddressSpace *address_space;
Avi Kivitya8170e52012-10-23 12:30:10 +0200269 hwaddr offset_within_region;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200270 Int128 size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200271 hwaddr offset_within_address_space;
Avi Kivity7a8499e2012-02-08 17:01:23 +0200272 bool readonly;
Avi Kivitye2177952011-12-08 15:00:18 +0200273};
274
Avi Kivity093bc2c2011-07-26 14:26:01 +0300275/**
276 * memory_region_init: Initialize a memory region
277 *
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300278 * The region typically acts as a container for other memory regions. Use
Avi Kivity093bc2c2011-07-26 14:26:01 +0300279 * memory_region_add_subregion() to add subregions.
280 *
281 * @mr: the #MemoryRegion to be initialized
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400282 * @owner: the object that tracks the region's reference count
Avi Kivity093bc2c2011-07-26 14:26:01 +0300283 * @name: used for debugging; not visible to the user or ABI
284 * @size: size of the region; any subregions beyond this size will be clipped
285 */
286void memory_region_init(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400287 struct Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300288 const char *name,
289 uint64_t size);
Paolo Bonzini46637be2013-05-07 09:06:00 +0200290
291/**
292 * memory_region_ref: Add 1 to a memory region's reference count
293 *
294 * Whenever memory regions are accessed outside the BQL, they need to be
295 * preserved against hot-unplug. MemoryRegions actually do not have their
296 * own reference count; they piggyback on a QOM object, their "owner".
297 * This function adds a reference to the owner.
298 *
299 * All MemoryRegions must have an owner if they can disappear, even if the
300 * device they belong to operates exclusively under the BQL. This is because
301 * the region could be returned at any time by memory_region_find, and this
302 * is usually under guest control.
303 *
304 * @mr: the #MemoryRegion
305 */
306void memory_region_ref(MemoryRegion *mr);
307
308/**
309 * memory_region_unref: Remove 1 to a memory region's reference count
310 *
311 * Whenever memory regions are accessed outside the BQL, they need to be
312 * preserved against hot-unplug. MemoryRegions actually do not have their
313 * own reference count; they piggyback on a QOM object, their "owner".
314 * This function removes a reference to the owner and possibly destroys it.
315 *
316 * @mr: the #MemoryRegion
317 */
318void memory_region_unref(MemoryRegion *mr);
319
Avi Kivity093bc2c2011-07-26 14:26:01 +0300320/**
321 * memory_region_init_io: Initialize an I/O memory region.
322 *
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300323 * Accesses into the region will cause the callbacks in @ops to be called.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300324 * if @size is nonzero, subregions will be clipped to @size.
325 *
326 * @mr: the #MemoryRegion to be initialized.
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400327 * @owner: the object that tracks the region's reference count
Avi Kivity093bc2c2011-07-26 14:26:01 +0300328 * @ops: a structure containing read and write callbacks to be used when
329 * I/O is performed on the region.
Daniel P. Berrangeb6af0972015-08-26 12:17:13 +0100330 * @opaque: passed to the read and write callbacks of the @ops structure.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300331 * @name: used for debugging; not visible to the user or ABI
332 * @size: size of the region.
333 */
334void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400335 struct Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300336 const MemoryRegionOps *ops,
337 void *opaque,
338 const char *name,
339 uint64_t size);
340
341/**
342 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300343 * region will modify memory directly.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300344 *
345 * @mr: the #MemoryRegion to be initialized.
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400346 * @owner: the object that tracks the region's reference count
Avi Kivityc5705a72011-12-20 15:59:12 +0200347 * @name: the name of the region.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300348 * @size: size of the region.
Hu Tao49946532014-09-09 13:27:55 +0800349 * @errp: pointer to Error*, to store an error if it happens.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300350 */
351void memory_region_init_ram(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400352 struct Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300353 const char *name,
Hu Tao49946532014-09-09 13:27:55 +0800354 uint64_t size,
355 Error **errp);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300356
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +0200357/**
358 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
359 * RAM. Accesses into the region will
360 * modify memory directly. Only an initial
361 * portion of this RAM is actually used.
362 * The used size can change across reboots.
363 *
364 * @mr: the #MemoryRegion to be initialized.
365 * @owner: the object that tracks the region's reference count
366 * @name: the name of the region.
367 * @size: used size of the region.
368 * @max_size: max size of the region.
369 * @resized: callback to notify owner about used size change.
370 * @errp: pointer to Error*, to store an error if it happens.
371 */
372void memory_region_init_resizeable_ram(MemoryRegion *mr,
373 struct Object *owner,
374 const char *name,
375 uint64_t size,
376 uint64_t max_size,
377 void (*resized)(const char*,
378 uint64_t length,
379 void *host),
380 Error **errp);
Paolo Bonzini0b183fc2014-05-14 17:43:19 +0800381#ifdef __linux__
382/**
383 * memory_region_init_ram_from_file: Initialize RAM memory region with a
384 * mmap-ed backend.
385 *
386 * @mr: the #MemoryRegion to be initialized.
387 * @owner: the object that tracks the region's reference count
388 * @name: the name of the region.
389 * @size: size of the region.
Paolo Bonzinidbcb8982014-06-10 19:15:24 +0800390 * @share: %true if memory must be mmaped with the MAP_SHARED flag
Paolo Bonzini0b183fc2014-05-14 17:43:19 +0800391 * @path: the path in which to allocate the RAM.
Paolo Bonzini7f56e742014-05-14 17:43:20 +0800392 * @errp: pointer to Error*, to store an error if it happens.
Paolo Bonzini0b183fc2014-05-14 17:43:19 +0800393 */
394void memory_region_init_ram_from_file(MemoryRegion *mr,
395 struct Object *owner,
396 const char *name,
397 uint64_t size,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +0800398 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +0800399 const char *path,
400 Error **errp);
Paolo Bonzini0b183fc2014-05-14 17:43:19 +0800401#endif
402
Avi Kivity093bc2c2011-07-26 14:26:01 +0300403/**
BALATON Zoltan1a7e8ca2012-08-22 17:18:38 +0200404 * memory_region_init_ram_ptr: Initialize RAM memory region from a
405 * user-provided pointer. Accesses into the
406 * region will modify memory directly.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300407 *
408 * @mr: the #MemoryRegion to be initialized.
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400409 * @owner: the object that tracks the region's reference count
Avi Kivityc5705a72011-12-20 15:59:12 +0200410 * @name: the name of the region.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300411 * @size: size of the region.
412 * @ptr: memory to be mapped; must contain at least @size bytes.
413 */
414void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400415 struct Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300416 const char *name,
417 uint64_t size,
418 void *ptr);
419
420/**
421 * memory_region_init_alias: Initialize a memory region that aliases all or a
422 * part of another memory region.
423 *
424 * @mr: the #MemoryRegion to be initialized.
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400425 * @owner: the object that tracks the region's reference count
Avi Kivity093bc2c2011-07-26 14:26:01 +0300426 * @name: used for debugging; not visible to the user or ABI
427 * @orig: the region to be referenced; @mr will be equivalent to
428 * @orig between @offset and @offset + @size - 1.
429 * @offset: start of the section in @orig to be referenced.
430 * @size: size of the region.
431 */
432void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400433 struct Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300434 const char *name,
435 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +0200436 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300437 uint64_t size);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300438
439/**
440 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
441 * handled via callbacks.
442 *
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +0100443 * If NULL callbacks pointer is given, then I/O space is not supposed to be
444 * handled by QEMU itself. Any access via the memory API will cause an abort().
445 *
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300446 * @mr: the #MemoryRegion to be initialized.
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400447 * @owner: the object that tracks the region's reference count
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300448 * @ops: callbacks for write access handling.
Avi Kivityc5705a72011-12-20 15:59:12 +0200449 * @name: the name of the region.
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300450 * @size: size of the region.
Hu Tao33e0eb52014-09-09 13:27:57 +0800451 * @errp: pointer to Error*, to store an error if it happens.
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300452 */
453void memory_region_init_rom_device(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400454 struct Object *owner,
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300455 const MemoryRegionOps *ops,
Avi Kivity75f59412011-08-26 00:35:15 +0300456 void *opaque,
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300457 const char *name,
Hu Tao33e0eb52014-09-09 13:27:57 +0800458 uint64_t size,
459 Error **errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300460
Avi Kivity093bc2c2011-07-26 14:26:01 +0300461/**
Jan Kiszka1660e722011-10-23 16:01:19 +0200462 * memory_region_init_reservation: Initialize a memory region that reserves
463 * I/O space.
464 *
465 * A reservation region primariy serves debugging purposes. It claims I/O
466 * space that is not supposed to be handled by QEMU itself. Any access via
467 * the memory API will cause an abort().
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +0100468 * This function is deprecated. Use memory_region_init_io() with NULL
469 * callbacks instead.
Jan Kiszka1660e722011-10-23 16:01:19 +0200470 *
471 * @mr: the #MemoryRegion to be initialized
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400472 * @owner: the object that tracks the region's reference count
Jan Kiszka1660e722011-10-23 16:01:19 +0200473 * @name: used for debugging; not visible to the user or ABI
474 * @size: size of the region.
475 */
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +0100476static inline void memory_region_init_reservation(MemoryRegion *mr,
477 Object *owner,
Jan Kiszka1660e722011-10-23 16:01:19 +0200478 const char *name,
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +0100479 uint64_t size)
480{
481 memory_region_init_io(mr, owner, NULL, mr, name, size);
482}
Avi Kivity30951152012-10-30 13:47:46 +0200483
484/**
485 * memory_region_init_iommu: Initialize a memory region that translates
486 * addresses
487 *
488 * An IOMMU region translates addresses and forwards accesses to a target
489 * memory region.
490 *
491 * @mr: the #MemoryRegion to be initialized
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400492 * @owner: the object that tracks the region's reference count
Avi Kivity30951152012-10-30 13:47:46 +0200493 * @ops: a function that translates addresses into the @target region
494 * @name: used for debugging; not visible to the user or ABI
495 * @size: size of the region.
496 */
497void memory_region_init_iommu(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -0400498 struct Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +0200499 const MemoryRegionIOMMUOps *ops,
500 const char *name,
501 uint64_t size);
502
Jan Kiszka1660e722011-10-23 16:01:19 +0200503/**
Paolo Bonzini803c0812013-05-07 06:59:09 +0200504 * memory_region_owner: get a memory region's owner.
505 *
506 * @mr: the memory region being queried.
507 */
508struct Object *memory_region_owner(MemoryRegion *mr);
509
510/**
Avi Kivity093bc2c2011-07-26 14:26:01 +0300511 * memory_region_size: get a memory region's size.
512 *
513 * @mr: the memory region being queried.
514 */
515uint64_t memory_region_size(MemoryRegion *mr);
516
517/**
Avi Kivity8ea92522011-12-08 15:58:43 +0200518 * memory_region_is_ram: check whether a memory region is random access
519 *
520 * Returns %true is a memory region is random access.
521 *
522 * @mr: the memory region being queried
523 */
Paolo Bonzini1619d1f2015-12-09 17:47:39 +0100524static inline bool memory_region_is_ram(MemoryRegion *mr)
525{
526 return mr->ram;
527}
Avi Kivity8ea92522011-12-08 15:58:43 +0200528
529/**
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +0530530 * memory_region_is_skip_dump: check whether a memory region should not be
531 * dumped
532 *
533 * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP).
534 *
535 * @mr: the memory region being queried
536 */
537bool memory_region_is_skip_dump(MemoryRegion *mr);
538
539/**
540 * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory
541 * region
542 *
543 * @mr: the memory region being queried
544 */
545void memory_region_set_skip_dump(MemoryRegion *mr);
546
547/**
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200548 * memory_region_is_romd: check whether a memory region is in ROMD mode
Blue Swirlfd062572012-04-09 17:38:52 +0000549 *
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200550 * Returns %true if a memory region is a ROM device and currently set to allow
Blue Swirlfd062572012-04-09 17:38:52 +0000551 * direct reads.
552 *
553 * @mr: the memory region being queried
554 */
555static inline bool memory_region_is_romd(MemoryRegion *mr)
556{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200557 return mr->rom_device && mr->romd_mode;
Blue Swirlfd062572012-04-09 17:38:52 +0000558}
559
560/**
Avi Kivity30951152012-10-30 13:47:46 +0200561 * memory_region_is_iommu: check whether a memory region is an iommu
562 *
563 * Returns %true is a memory region is an iommu.
564 *
565 * @mr: the memory region being queried
566 */
Paolo Bonzini1619d1f2015-12-09 17:47:39 +0100567static inline bool memory_region_is_iommu(MemoryRegion *mr)
568{
569 return mr->iommu_ops;
570}
571
Avi Kivity30951152012-10-30 13:47:46 +0200572
573/**
David Gibson06866572013-05-14 19:13:56 +1000574 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
575 *
576 * @mr: the memory region that was changed
577 * @entry: the new entry in the IOMMU translation table. The entry
578 * replaces all old entries for the same virtual I/O address range.
579 * Deleted entries have .@perm == 0.
580 */
581void memory_region_notify_iommu(MemoryRegion *mr,
582 IOMMUTLBEntry entry);
583
584/**
585 * memory_region_register_iommu_notifier: register a notifier for changes to
586 * IOMMU translation entries.
587 *
588 * @mr: the memory region to observe
589 * @n: the notifier to be added; the notifier receives a pointer to an
590 * #IOMMUTLBEntry as the opaque value; the pointer ceases to be
591 * valid on exit from the notifier.
592 */
593void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
594
595/**
David Gibsona788f222015-09-30 12:13:55 +1000596 * memory_region_iommu_replay: replay existing IOMMU translations to
597 * a notifier
598 *
599 * @mr: the memory region to observe
600 * @n: the notifier to which to replay iommu mappings
601 * @granularity: Minimum page granularity to replay notifications for
602 * @is_write: Whether to treat the replay as a translate "write"
603 * through the iommu
604 */
605void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n,
606 hwaddr granularity, bool is_write);
607
608/**
David Gibson06866572013-05-14 19:13:56 +1000609 * memory_region_unregister_iommu_notifier: unregister a notifier for
610 * changes to IOMMU translation entries.
611 *
612 * @n: the notifier to be removed.
613 */
614void memory_region_unregister_iommu_notifier(Notifier *n);
615
616/**
Avi Kivity8991c792011-12-20 15:53:11 +0200617 * memory_region_name: get a memory region's name
618 *
619 * Returns the string that was used to initialize the memory region.
620 *
621 * @mr: the memory region being queried
622 */
Peter Crosthwaite5d546d42014-08-14 23:55:03 -0700623const char *memory_region_name(const MemoryRegion *mr);
Avi Kivity8991c792011-12-20 15:53:11 +0200624
625/**
Avi Kivity55043ba2011-12-15 17:20:34 +0200626 * memory_region_is_logging: return whether a memory region is logging writes
627 *
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +0100628 * Returns %true if the memory region is logging writes for the given client
629 *
630 * @mr: the memory region being queried
631 * @client: the client being queried
632 */
633bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
634
635/**
636 * memory_region_get_dirty_log_mask: return the clients for which a
637 * memory region is logging writes.
638 *
Paolo Bonzini677e7802015-03-23 10:53:21 +0100639 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
640 * are the bit indices.
Avi Kivity55043ba2011-12-15 17:20:34 +0200641 *
642 * @mr: the memory region being queried
643 */
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +0100644uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
Avi Kivity55043ba2011-12-15 17:20:34 +0200645
646/**
Avi Kivityce7923d2011-12-08 16:05:11 +0200647 * memory_region_is_rom: check whether a memory region is ROM
648 *
649 * Returns %true is a memory region is read-only memory.
650 *
651 * @mr: the memory region being queried
652 */
Paolo Bonzini1619d1f2015-12-09 17:47:39 +0100653static inline bool memory_region_is_rom(MemoryRegion *mr)
654{
655 return mr->ram && mr->readonly;
656}
657
Avi Kivityce7923d2011-12-08 16:05:11 +0200658
659/**
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +0800660 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
661 *
662 * Returns a file descriptor backing a file-based RAM memory region,
663 * or -1 if the region is not a file-based RAM memory region.
664 *
665 * @mr: the RAM or alias memory region being queried.
666 */
667int memory_region_get_fd(MemoryRegion *mr);
668
669/**
Paolo Bonzini4ff87572016-03-25 12:30:16 +0100670 * memory_region_set_fd: Mark a RAM memory region as backed by a
671 * file descriptor.
672 *
673 * This function is typically used after memory_region_init_ram_ptr().
674 *
675 * @mr: the memory region being queried.
676 * @fd: the file descriptor that backs @mr.
677 */
678void memory_region_set_fd(MemoryRegion *mr, int fd);
679
680/**
Avi Kivity093bc2c2011-07-26 14:26:01 +0300681 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
682 *
683 * Returns a host pointer to a RAM memory region (created with
Paolo Bonzini49b24af2015-12-16 10:30:47 +0100684 * memory_region_init_ram() or memory_region_init_ram_ptr()).
685 *
686 * Use with care; by the time this function returns, the returned pointer is
687 * not protected by RCU anymore. If the caller is not within an RCU critical
688 * section and does not hold the iothread lock, it must have other means of
689 * protecting the pointer, such as a reference to the region that includes
690 * the incoming ram_addr_t.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300691 *
692 * @mr: the memory region being queried.
693 */
694void *memory_region_get_ram_ptr(MemoryRegion *mr);
695
Paolo Bonzini37d7c082015-03-23 10:21:46 +0100696/* memory_region_ram_resize: Resize a RAM region.
697 *
698 * Only legal before guest might have detected the memory size: e.g. on
699 * incoming migration, or right after reset.
700 *
701 * @mr: a memory region created with @memory_region_init_resizeable_ram.
702 * @newsize: the new size the region
703 * @errp: pointer to Error*, to store an error if it happens.
704 */
705void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
706 Error **errp);
707
Avi Kivity093bc2c2011-07-26 14:26:01 +0300708/**
Avi Kivity093bc2c2011-07-26 14:26:01 +0300709 * memory_region_set_log: Turn dirty logging on or off for a region.
710 *
711 * Turns dirty logging on or off for a specified client (display, migration).
712 * Only meaningful for RAM regions.
713 *
714 * @mr: the memory region being updated.
715 * @log: whether dirty logging is to be enabled or disabled.
Paolo Bonzinidbddac62015-03-23 10:31:53 +0100716 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300717 */
718void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
719
720/**
Blue Swirlcd7a45c2012-01-22 16:38:21 +0000721 * memory_region_get_dirty: Check whether a range of bytes is dirty
722 * for a specified client.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300723 *
Blue Swirlcd7a45c2012-01-22 16:38:21 +0000724 * Checks whether a range of bytes has been written to since the last
Avi Kivity093bc2c2011-07-26 14:26:01 +0300725 * call to memory_region_reset_dirty() with the same @client. Dirty logging
726 * must be enabled.
727 *
728 * @mr: the memory region being queried.
729 * @addr: the address (relative to the start of the region) being queried.
Blue Swirlcd7a45c2012-01-22 16:38:21 +0000730 * @size: the size of the range being queried.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300731 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
732 * %DIRTY_MEMORY_VGA.
733 */
Avi Kivitya8170e52012-10-23 12:30:10 +0200734bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
735 hwaddr size, unsigned client);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300736
737/**
Blue Swirlfd4aa972011-10-16 16:04:59 +0000738 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300739 *
Blue Swirlfd4aa972011-10-16 16:04:59 +0000740 * Marks a range of bytes as dirty, after it has been dirtied outside
741 * guest code.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300742 *
Blue Swirlfd4aa972011-10-16 16:04:59 +0000743 * @mr: the memory region being dirtied.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300744 * @addr: the address (relative to the start of the region) being dirtied.
Blue Swirlfd4aa972011-10-16 16:04:59 +0000745 * @size: size of the range being dirtied.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300746 */
Avi Kivitya8170e52012-10-23 12:30:10 +0200747void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
748 hwaddr size);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300749
750/**
Juan Quintela6c279db2012-10-17 20:24:28 +0200751 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
752 * for a specified client. It clears them.
753 *
754 * Checks whether a range of bytes has been written to since the last
755 * call to memory_region_reset_dirty() with the same @client. Dirty logging
756 * must be enabled.
757 *
758 * @mr: the memory region being queried.
759 * @addr: the address (relative to the start of the region) being queried.
760 * @size: the size of the range being queried.
761 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
762 * %DIRTY_MEMORY_VGA.
763 */
764bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
765 hwaddr size, unsigned client);
766/**
Avi Kivity093bc2c2011-07-26 14:26:01 +0300767 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
768 * any external TLBs (e.g. kvm)
769 *
770 * Flushes dirty information from accelerators such as kvm and vhost-net
771 * and makes it available to users of the memory API.
772 *
773 * @mr: the region being flushed.
774 */
775void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
776
777/**
778 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
779 * client.
780 *
781 * Marks a range of pages as no longer dirty.
782 *
783 * @mr: the region being updated.
784 * @addr: the start of the subrange being cleaned.
785 * @size: the size of the subrange being cleaned.
786 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
787 * %DIRTY_MEMORY_VGA.
788 */
Avi Kivitya8170e52012-10-23 12:30:10 +0200789void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
790 hwaddr size, unsigned client);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300791
792/**
793 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
794 *
795 * Allows a memory region to be marked as read-only (turning it into a ROM).
796 * only useful on RAM regions.
797 *
798 * @mr: the region being updated.
799 * @readonly: whether rhe region is to be ROM or RAM.
800 */
801void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
802
803/**
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200804 * memory_region_rom_device_set_romd: enable/disable ROMD mode
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300805 *
806 * Allows a ROM device (initialized with memory_region_init_rom_device() to
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200807 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
808 * device is mapped to guest memory and satisfies read access directly.
809 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
810 * Writes are always handled by the #MemoryRegion.write function.
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300811 *
812 * @mr: the memory region to be updated
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200813 * @romd_mode: %true to put the region into ROMD mode
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300814 */
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +0200815void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300816
817/**
Avi Kivity093bc2c2011-07-26 14:26:01 +0300818 * memory_region_set_coalescing: Enable memory coalescing for the region.
819 *
820 * Enabled writes to a region to be queued for later processing. MMIO ->write
821 * callbacks may be delayed until a non-coalesced MMIO is issued.
822 * Only useful for IO regions. Roughly similar to write-combining hardware.
823 *
824 * @mr: the memory region to be write coalesced
825 */
826void memory_region_set_coalescing(MemoryRegion *mr);
827
828/**
829 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
830 * a region.
831 *
832 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
833 * Multiple calls can be issued coalesced disjoint ranges.
834 *
835 * @mr: the memory region to be updated.
836 * @offset: the start of the range within the region to be coalesced.
837 * @size: the size of the subrange to be coalesced.
838 */
839void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200840 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300841 uint64_t size);
842
843/**
844 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
845 *
846 * Disables any coalescing caused by memory_region_set_coalescing() or
847 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
848 * hardware.
849 *
850 * @mr: the memory region to be updated.
851 */
852void memory_region_clear_coalescing(MemoryRegion *mr);
853
854/**
Jan Kiszkad4105152012-08-23 13:02:29 +0200855 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
856 * accesses.
857 *
858 * Ensure that pending coalesced MMIO request are flushed before the memory
859 * region is accessed. This property is automatically enabled for all regions
860 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
861 *
862 * @mr: the memory region to be updated.
863 */
864void memory_region_set_flush_coalesced(MemoryRegion *mr);
865
866/**
867 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
868 * accesses.
869 *
870 * Clear the automatic coalesced MMIO flushing enabled via
871 * memory_region_set_flush_coalesced. Note that this service has no effect on
872 * memory regions that have MMIO coalescing enabled for themselves. For them,
873 * automatic flushing will stop once coalescing is disabled.
874 *
875 * @mr: the memory region to be updated.
876 */
877void memory_region_clear_flush_coalesced(MemoryRegion *mr);
878
879/**
Jan Kiszka196ea132015-06-18 18:47:20 +0200880 * memory_region_set_global_locking: Declares the access processing requires
881 * QEMU's global lock.
882 *
883 * When this is invoked, accesses to the memory region will be processed while
884 * holding the global lock of QEMU. This is the default behavior of memory
885 * regions.
886 *
887 * @mr: the memory region to be updated.
888 */
889void memory_region_set_global_locking(MemoryRegion *mr);
890
891/**
892 * memory_region_clear_global_locking: Declares that access processing does
893 * not depend on the QEMU global lock.
894 *
895 * By clearing this property, accesses to the memory region will be processed
896 * outside of QEMU's global lock (unless the lock is held on when issuing the
897 * access request). In this case, the device model implementing the access
898 * handlers is responsible for synchronization of concurrency.
899 *
900 * @mr: the memory region to be updated.
901 */
902void memory_region_clear_global_locking(MemoryRegion *mr);
903
904/**
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300905 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
906 * is written to a location.
907 *
908 * Marks a word in an IO region (initialized with memory_region_init_io())
909 * as a trigger for an eventfd event. The I/O callback will not be called.
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300910 * The caller must be prepared to handle failure (that is, take the required
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300911 * action if the callback _is_ called).
912 *
913 * @mr: the memory region being updated.
914 * @addr: the address within @mr that is to be monitored
915 * @size: the size of the access to trigger the eventfd
916 * @match_data: whether to match against @data, instead of just @addr
917 * @data: the data to match against the guest write
918 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
919 **/
920void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200921 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300922 unsigned size,
923 bool match_data,
924 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200925 EventNotifier *e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300926
927/**
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300928 * memory_region_del_eventfd: Cancel an eventfd.
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300929 *
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300930 * Cancels an eventfd trigger requested by a previous
931 * memory_region_add_eventfd() call.
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300932 *
933 * @mr: the memory region being updated.
934 * @addr: the address within @mr that is to be monitored
935 * @size: the size of the access to trigger the eventfd
936 * @match_data: whether to match against @data, instead of just @addr
937 * @data: the data to match against the guest write
938 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
939 */
940void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200941 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300942 unsigned size,
943 bool match_data,
944 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200945 EventNotifier *e);
946
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300947/**
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300948 * memory_region_add_subregion: Add a subregion to a container.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300949 *
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300950 * Adds a subregion at @offset. The subregion may not overlap with other
Avi Kivity093bc2c2011-07-26 14:26:01 +0300951 * subregions (except for those explicitly marked as overlapping). A region
952 * may only be added once as a subregion (unless removed with
953 * memory_region_del_subregion()); use memory_region_init_alias() if you
954 * want a region to be a subregion in multiple locations.
955 *
956 * @mr: the region to contain the new subregion; must be a container
957 * initialized with memory_region_init().
958 * @offset: the offset relative to @mr where @subregion is added.
959 * @subregion: the subregion to be added.
960 */
961void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200962 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300963 MemoryRegion *subregion);
964/**
BALATON Zoltan1a7e8ca2012-08-22 17:18:38 +0200965 * memory_region_add_subregion_overlap: Add a subregion to a container
966 * with overlap.
Avi Kivity093bc2c2011-07-26 14:26:01 +0300967 *
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -0300968 * Adds a subregion at @offset. The subregion may overlap with other
Avi Kivity093bc2c2011-07-26 14:26:01 +0300969 * subregions. Conflicts are resolved by having a higher @priority hide a
970 * lower @priority. Subregions without priority are taken as @priority 0.
971 * A region may only be added once as a subregion (unless removed with
972 * memory_region_del_subregion()); use memory_region_init_alias() if you
973 * want a region to be a subregion in multiple locations.
974 *
975 * @mr: the region to contain the new subregion; must be a container
976 * initialized with memory_region_init().
977 * @offset: the offset relative to @mr where @subregion is added.
978 * @subregion: the subregion to be added.
979 * @priority: used for resolving overlaps; highest priority wins.
980 */
981void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200982 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300983 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +0300984 int priority);
Avi Kivitye34911c2011-12-19 12:06:23 +0200985
986/**
987 * memory_region_get_ram_addr: Get the ram address associated with a memory
988 * region
Avi Kivitye34911c2011-12-19 12:06:23 +0200989 */
Fam Zheng7ebb2742016-03-01 14:18:20 +0800990ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
Avi Kivitye34911c2011-12-19 12:06:23 +0200991
Igor Mammedova2b257d2014-10-31 16:38:37 +0000992uint64_t memory_region_get_alignment(const MemoryRegion *mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300993/**
994 * memory_region_del_subregion: Remove a subregion.
995 *
996 * Removes a subregion from its container.
997 *
998 * @mr: the container to be updated.
999 * @subregion: the region being removed; must be a current subregion of @mr.
1000 */
1001void memory_region_del_subregion(MemoryRegion *mr,
1002 MemoryRegion *subregion);
1003
Avi Kivity6bba19b2011-09-14 11:54:58 +03001004/*
1005 * memory_region_set_enabled: dynamically enable or disable a region
1006 *
1007 * Enables or disables a memory region. A disabled memory region
1008 * ignores all accesses to itself and its subregions. It does not
1009 * obscure sibling subregions with lower priority - it simply behaves as
1010 * if it was removed from the hierarchy.
1011 *
1012 * Regions default to being enabled.
1013 *
1014 * @mr: the region to be updated
1015 * @enabled: whether to enable or disable the region
1016 */
1017void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1018
Avi Kivity2282e1a2011-09-14 12:10:12 +03001019/*
1020 * memory_region_set_address: dynamically update the address of a region
1021 *
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001022 * Dynamically updates the address of a region, relative to its container.
Avi Kivity2282e1a2011-09-14 12:10:12 +03001023 * May be used on regions are currently part of a memory hierarchy.
1024 *
1025 * @mr: the region to be updated
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001026 * @addr: new address, relative to container region
Avi Kivity2282e1a2011-09-14 12:10:12 +03001027 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001028void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
Avi Kivity2282e1a2011-09-14 12:10:12 +03001029
Avi Kivity47033592011-12-04 19:16:50 +02001030/*
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02001031 * memory_region_set_size: dynamically update the size of a region.
1032 *
1033 * Dynamically updates the size of a region.
1034 *
1035 * @mr: the region to be updated
1036 * @size: used size of the region.
1037 */
1038void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1039
1040/*
Avi Kivity47033592011-12-04 19:16:50 +02001041 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1042 *
1043 * Dynamically updates the offset into the target region that an alias points
1044 * to, as if the fourth argument to memory_region_init_alias() has changed.
1045 *
1046 * @mr: the #MemoryRegion to be updated; should be an alias.
1047 * @offset: the new offset into the target memory region
1048 */
1049void memory_region_set_alias_offset(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001050 hwaddr offset);
Avi Kivity47033592011-12-04 19:16:50 +02001051
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -03001052/**
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001053 * memory_region_present: checks if an address relative to a @container
1054 * translates into #MemoryRegion within @container
Paolo Bonzini3ce10902013-07-02 13:40:48 +02001055 *
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001056 * Answer whether a #MemoryRegion within @container covers the address
Paolo Bonzini3ce10902013-07-02 13:40:48 +02001057 * @addr.
1058 *
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001059 * @container: a #MemoryRegion within which @addr is a relative address
1060 * @addr: the area within @container to be searched
Paolo Bonzini3ce10902013-07-02 13:40:48 +02001061 */
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001062bool memory_region_present(MemoryRegion *container, hwaddr addr);
Paolo Bonzini3ce10902013-07-02 13:40:48 +02001063
1064/**
Igor Mammedoveed2bac2014-06-02 15:25:06 +02001065 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1066 * into any address space.
1067 *
1068 * @mr: a #MemoryRegion which should be checked if it's mapped
1069 */
1070bool memory_region_is_mapped(MemoryRegion *mr);
1071
1072/**
Paolo Bonzini73034e92013-05-07 15:48:28 +02001073 * memory_region_find: translate an address/size relative to a
1074 * MemoryRegion into a #MemoryRegionSection.
Avi Kivitye2177952011-12-08 15:00:18 +02001075 *
Paolo Bonzini73034e92013-05-07 15:48:28 +02001076 * Locates the first #MemoryRegion within @mr that overlaps the range
1077 * given by @addr and @size.
Avi Kivitye2177952011-12-08 15:00:18 +02001078 *
1079 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1080 * It will have the following characteristics:
Avi Kivitye2177952011-12-08 15:00:18 +02001081 * .@size = 0 iff no overlap was found
1082 * .@mr is non-%NULL iff an overlap was found
1083 *
Paolo Bonzini73034e92013-05-07 15:48:28 +02001084 * Remember that in the return value the @offset_within_region is
1085 * relative to the returned region (in the .@mr field), not to the
1086 * @mr argument.
1087 *
1088 * Similarly, the .@offset_within_address_space is relative to the
1089 * address space that contains both regions, the passed and the
1090 * returned one. However, in the special case where the @mr argument
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02001091 * has no container (and thus is the root of the address space), the
Paolo Bonzini73034e92013-05-07 15:48:28 +02001092 * following will hold:
1093 * .@offset_within_address_space >= @addr
1094 * .@offset_within_address_space + .@size <= @addr + @size
1095 *
1096 * @mr: a MemoryRegion within which @addr is a relative address
1097 * @addr: start of the area within @as to be searched
Avi Kivitye2177952011-12-08 15:00:18 +02001098 * @size: size of the area to be searched
1099 */
Paolo Bonzini73034e92013-05-07 15:48:28 +02001100MemoryRegionSection memory_region_find(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001101 hwaddr addr, uint64_t size);
Avi Kivitye2177952011-12-08 15:00:18 +02001102
Blue Swirlfd062572012-04-09 17:38:52 +00001103/**
Paolo Bonzini1d671362013-04-24 10:46:55 +02001104 * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory
Avi Kivity86e775c2011-12-15 16:24:49 +02001105 *
1106 * Synchronizes the dirty page log for an entire address space.
Paolo Bonzini1d671362013-04-24 10:46:55 +02001107 * @as: the address space that contains the memory being synchronized
Avi Kivity86e775c2011-12-15 16:24:49 +02001108 */
Paolo Bonzini1d671362013-04-24 10:46:55 +02001109void address_space_sync_dirty_bitmap(AddressSpace *as);
Avi Kivity86e775c2011-12-15 16:24:49 +02001110
Avi Kivitye2177952011-12-08 15:00:18 +02001111/**
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -03001112 * memory_region_transaction_begin: Start a transaction.
1113 *
1114 * During a transaction, changes will be accumulated and made visible
Stefan Weildabdf392012-01-08 19:35:09 +01001115 * only when the transaction ends (is committed).
Avi Kivity4ef4db82011-07-26 14:26:13 +03001116 */
1117void memory_region_transaction_begin(void);
Ademar de Souza Reis Jr69ddaf62011-12-05 16:54:14 -03001118
1119/**
1120 * memory_region_transaction_commit: Commit a transaction and make changes
1121 * visible to the guest.
Avi Kivity4ef4db82011-07-26 14:26:13 +03001122 */
1123void memory_region_transaction_commit(void);
1124
Avi Kivity7664e802011-12-11 14:47:25 +02001125/**
1126 * memory_listener_register: register callbacks to be called when memory
1127 * sections are mapped or unmapped into an address
1128 * space
1129 *
1130 * @listener: an object containing the callbacks to be called
Avi Kivity7376e582012-02-08 21:05:17 +02001131 * @filter: if non-%NULL, only regions in this address space will be observed
Avi Kivity7664e802011-12-11 14:47:25 +02001132 */
Avi Kivityf6790af2012-10-02 20:13:51 +02001133void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
Avi Kivity7664e802011-12-11 14:47:25 +02001134
1135/**
1136 * memory_listener_unregister: undo the effect of memory_listener_register()
1137 *
1138 * @listener: an object containing the callbacks to be removed
1139 */
1140void memory_listener_unregister(MemoryListener *listener);
1141
1142/**
1143 * memory_global_dirty_log_start: begin dirty logging for all regions
1144 */
1145void memory_global_dirty_log_start(void);
1146
1147/**
BALATON Zoltan1a7e8ca2012-08-22 17:18:38 +02001148 * memory_global_dirty_log_stop: end dirty logging for all regions
Avi Kivity7664e802011-12-11 14:47:25 +02001149 */
1150void memory_global_dirty_log_stop(void);
1151
Blue Swirl314e2982011-09-11 20:22:05 +00001152void mtree_info(fprintf_function mon_printf, void *f);
1153
Avi Kivity9ad2bbc2012-10-02 14:59:23 +02001154/**
Peter Maydell3b643492015-04-26 16:49:23 +01001155 * memory_region_dispatch_read: perform a read directly to the specified
1156 * MemoryRegion.
1157 *
1158 * @mr: #MemoryRegion to access
1159 * @addr: address within that region
1160 * @pval: pointer to uint64_t which the data is written to
1161 * @size: size of the access in bytes
1162 * @attrs: memory transaction attributes to use for the access
1163 */
1164MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1165 hwaddr addr,
1166 uint64_t *pval,
1167 unsigned size,
1168 MemTxAttrs attrs);
1169/**
1170 * memory_region_dispatch_write: perform a write directly to the specified
1171 * MemoryRegion.
1172 *
1173 * @mr: #MemoryRegion to access
1174 * @addr: address within that region
1175 * @data: data to write
1176 * @size: size of the access in bytes
1177 * @attrs: memory transaction attributes to use for the access
1178 */
1179MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1180 hwaddr addr,
1181 uint64_t data,
1182 unsigned size,
1183 MemTxAttrs attrs);
1184
1185/**
Avi Kivity9ad2bbc2012-10-02 14:59:23 +02001186 * address_space_init: initializes an address space
1187 *
1188 * @as: an uninitialized #AddressSpace
Veres Lajos67cc32e2015-09-08 22:45:14 +01001189 * @root: a #MemoryRegion that routes addresses for the address space
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001190 * @name: an address space name. The name is only used for debugging
1191 * output.
Avi Kivity9ad2bbc2012-10-02 14:59:23 +02001192 */
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001193void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
Avi Kivity9ad2bbc2012-10-02 14:59:23 +02001194
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00001195/**
1196 * address_space_init_shareable: return an address space for a memory region,
1197 * creating it if it does not already exist
1198 *
1199 * @root: a #MemoryRegion that routes addresses for the address space
1200 * @name: an address space name. The name is only used for debugging
1201 * output.
1202 *
1203 * This function will return a pointer to an existing AddressSpace
1204 * which was initialized with the specified MemoryRegion, or it will
1205 * create and initialize one if it does not already exist. The ASes
1206 * are reference-counted, so the memory will be freed automatically
1207 * when the AddressSpace is destroyed via address_space_destroy.
1208 */
1209AddressSpace *address_space_init_shareable(MemoryRegion *root,
1210 const char *name);
Avi Kivity83f3c252012-10-07 12:59:55 +02001211
1212/**
1213 * address_space_destroy: destroy an address space
1214 *
1215 * Releases all resources associated with an address space. After an address space
1216 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1217 * as well.
1218 *
1219 * @as: address space to be destroyed
1220 */
1221void address_space_destroy(AddressSpace *as);
1222
Avi Kivityac1970f2012-10-03 16:22:53 +02001223/**
1224 * address_space_rw: read from or write to an address space.
1225 *
Peter Maydell5c9eb022015-04-26 16:49:24 +01001226 * Return a MemTxResult indicating whether the operation succeeded
1227 * or failed (eg unassigned memory, device rejected the transaction,
1228 * IOMMU fault).
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001229 *
Avi Kivityac1970f2012-10-03 16:22:53 +02001230 * @as: #AddressSpace to be accessed
1231 * @addr: address within that address space
Peter Maydell5c9eb022015-04-26 16:49:24 +01001232 * @attrs: memory transaction attributes
Avi Kivityac1970f2012-10-03 16:22:53 +02001233 * @buf: buffer with the data transferred
1234 * @is_write: indicates the transfer direction
1235 */
Peter Maydell5c9eb022015-04-26 16:49:24 +01001236MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1237 MemTxAttrs attrs, uint8_t *buf,
1238 int len, bool is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02001239
1240/**
1241 * address_space_write: write to address space.
1242 *
Peter Maydell5c9eb022015-04-26 16:49:24 +01001243 * Return a MemTxResult indicating whether the operation succeeded
1244 * or failed (eg unassigned memory, device rejected the transaction,
1245 * IOMMU fault).
Avi Kivityac1970f2012-10-03 16:22:53 +02001246 *
1247 * @as: #AddressSpace to be accessed
1248 * @addr: address within that address space
Peter Maydell5c9eb022015-04-26 16:49:24 +01001249 * @attrs: memory transaction attributes
Avi Kivityac1970f2012-10-03 16:22:53 +02001250 * @buf: buffer with the data transferred
1251 */
Peter Maydell5c9eb022015-04-26 16:49:24 +01001252MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1253 MemTxAttrs attrs,
1254 const uint8_t *buf, int len);
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001255
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01001256/* address_space_ld*: load from an address space
Peter Maydell50013112015-04-26 16:49:24 +01001257 * address_space_st*: store to an address space
1258 *
1259 * These functions perform a load or store of the byte, word,
1260 * longword or quad to the specified address within the AddressSpace.
1261 * The _le suffixed functions treat the data as little endian;
1262 * _be indicates big endian; no suffix indicates "same endianness
1263 * as guest CPU".
1264 *
1265 * The "guest CPU endianness" accessors are deprecated for use outside
1266 * target-* code; devices should be CPU-agnostic and use either the LE
1267 * or the BE accessors.
1268 *
1269 * @as #AddressSpace to be accessed
1270 * @addr: address within that address space
1271 * @val: data value, for stores
1272 * @attrs: memory transaction attributes
1273 * @result: location to write the success/failure of the transaction;
1274 * if NULL, this information is discarded
1275 */
1276uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
1277 MemTxAttrs attrs, MemTxResult *result);
1278uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
1279 MemTxAttrs attrs, MemTxResult *result);
1280uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
1281 MemTxAttrs attrs, MemTxResult *result);
1282uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
1283 MemTxAttrs attrs, MemTxResult *result);
1284uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
1285 MemTxAttrs attrs, MemTxResult *result);
1286uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
1287 MemTxAttrs attrs, MemTxResult *result);
1288uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
1289 MemTxAttrs attrs, MemTxResult *result);
1290void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
1291 MemTxAttrs attrs, MemTxResult *result);
1292void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
1293 MemTxAttrs attrs, MemTxResult *result);
1294void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
1295 MemTxAttrs attrs, MemTxResult *result);
1296void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
1297 MemTxAttrs attrs, MemTxResult *result);
1298void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
1299 MemTxAttrs attrs, MemTxResult *result);
1300void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
1301 MemTxAttrs attrs, MemTxResult *result);
1302void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
1303 MemTxAttrs attrs, MemTxResult *result);
1304
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001305/* address_space_translate: translate an address range into an address space
Paolo Bonzini41063e12015-03-18 14:21:43 +01001306 * into a MemoryRegion and an address range into that section. Should be
1307 * called from an RCU critical section, to avoid that the last reference
1308 * to the returned region disappears after address_space_translate returns.
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001309 *
1310 * @as: #AddressSpace to be accessed
1311 * @addr: address within that address space
1312 * @xlat: pointer to address within the returned memory region section's
1313 * #MemoryRegion.
1314 * @len: pointer to length
1315 * @is_write: indicates the transfer direction
1316 */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001317MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1318 hwaddr *xlat, hwaddr *len,
1319 bool is_write);
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001320
Paolo Bonzini51644ab2013-04-11 15:40:59 +02001321/* address_space_access_valid: check for validity of accessing an address
1322 * space range
1323 *
Avi Kivity30951152012-10-30 13:47:46 +02001324 * Check whether memory is assigned to the given address space range, and
1325 * access is permitted by any IOMMU regions that are active for the address
1326 * space.
Paolo Bonzini51644ab2013-04-11 15:40:59 +02001327 *
1328 * For now, addr and len should be aligned to a page size. This limitation
1329 * will be lifted in the future.
1330 *
1331 * @as: #AddressSpace to be accessed
1332 * @addr: address within that address space
1333 * @len: length of the area to be checked
1334 * @is_write: indicates the transfer direction
1335 */
1336bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1337
Avi Kivityac1970f2012-10-03 16:22:53 +02001338/* address_space_map: map a physical memory region into a host virtual address
1339 *
1340 * May map a subset of the requested range, given by and returned in @plen.
1341 * May return %NULL if resources needed to perform the mapping are exhausted.
1342 * Use only for reads OR writes - not for read-modify-write operations.
1343 * Use cpu_register_map_client() to know when retrying the map operation is
1344 * likely to succeed.
1345 *
1346 * @as: #AddressSpace to be accessed
1347 * @addr: address within that address space
1348 * @plen: pointer to length of buffer; updated on return
1349 * @is_write: indicates the transfer direction
1350 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001351void *address_space_map(AddressSpace *as, hwaddr addr,
1352 hwaddr *plen, bool is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02001353
1354/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1355 *
1356 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1357 * the amount of memory that was actually read or written by the caller.
1358 *
1359 * @as: #AddressSpace used
1360 * @addr: address within that address space
1361 * @len: buffer length as returned by address_space_map()
1362 * @access_len: amount of data actually transferred
1363 * @is_write: indicates the transfer direction
1364 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001365void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1366 int is_write, hwaddr access_len);
Avi Kivityac1970f2012-10-03 16:22:53 +02001367
1368
Paolo Bonzinia203ac72015-12-09 10:18:57 +01001369/* Internal functions, part of the implementation of address_space_read. */
1370MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
1371 MemTxAttrs attrs, uint8_t *buf,
1372 int len, hwaddr addr1, hwaddr l,
1373 MemoryRegion *mr);
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01001374MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
1375 MemTxAttrs attrs, uint8_t *buf, int len);
Gonglei3655cb92016-02-20 10:35:20 +08001376void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01001377
1378static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1379{
1380 if (is_write) {
1381 return memory_region_is_ram(mr) && !mr->readonly;
1382 } else {
1383 return memory_region_is_ram(mr) || memory_region_is_romd(mr);
1384 }
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01001385}
1386
1387/**
1388 * address_space_read: read from an address space.
1389 *
1390 * Return a MemTxResult indicating whether the operation succeeded
1391 * or failed (eg unassigned memory, device rejected the transaction,
1392 * IOMMU fault).
1393 *
1394 * @as: #AddressSpace to be accessed
1395 * @addr: address within that address space
1396 * @attrs: memory transaction attributes
1397 * @buf: buffer with the data transferred
1398 */
1399static inline __attribute__((__always_inline__))
1400MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
1401 uint8_t *buf, int len)
1402{
1403 MemTxResult result = MEMTX_OK;
1404 hwaddr l, addr1;
1405 void *ptr;
1406 MemoryRegion *mr;
1407
1408 if (__builtin_constant_p(len)) {
1409 if (len) {
1410 rcu_read_lock();
1411 l = len;
1412 mr = address_space_translate(as, addr, &addr1, &l, false);
1413 if (len == l && memory_access_is_direct(mr, false)) {
1414 addr1 += memory_region_get_ram_addr(mr);
Gonglei3655cb92016-02-20 10:35:20 +08001415 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01001416 memcpy(buf, ptr, len);
1417 } else {
1418 result = address_space_read_continue(as, addr, attrs, buf, len,
1419 addr1, l, mr);
1420 }
1421 rcu_read_unlock();
1422 }
1423 } else {
1424 result = address_space_read_full(as, addr, attrs, buf, len);
1425 }
1426 return result;
1427}
Paolo Bonzinia203ac72015-12-09 10:18:57 +01001428
Avi Kivity093bc2c2011-07-26 14:26:01 +03001429#endif
1430
1431#endif