Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Physical memory management |
| 3 | * |
| 4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates |
| 5 | * |
| 6 | * Authors: |
| 7 | * Avi Kivity <avi@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #include "memory.h" |
| 15 | #include <assert.h> |
| 16 | |
| 17 | typedef struct AddrRange AddrRange; |
| 18 | |
| 19 | struct AddrRange { |
| 20 | uint64_t start; |
| 21 | uint64_t size; |
| 22 | }; |
| 23 | |
| 24 | static AddrRange addrrange_make(uint64_t start, uint64_t size) |
| 25 | { |
| 26 | return (AddrRange) { start, size }; |
| 27 | } |
| 28 | |
| 29 | static bool addrrange_equal(AddrRange r1, AddrRange r2) |
| 30 | { |
| 31 | return r1.start == r2.start && r1.size == r2.size; |
| 32 | } |
| 33 | |
| 34 | static uint64_t addrrange_end(AddrRange r) |
| 35 | { |
| 36 | return r.start + r.size; |
| 37 | } |
| 38 | |
| 39 | static AddrRange addrrange_shift(AddrRange range, int64_t delta) |
| 40 | { |
| 41 | range.start += delta; |
| 42 | return range; |
| 43 | } |
| 44 | |
| 45 | static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
| 46 | { |
| 47 | return (r1.start >= r2.start && r1.start < r2.start + r2.size) |
| 48 | || (r2.start >= r1.start && r2.start < r1.start + r1.size); |
| 49 | } |
| 50 | |
| 51 | static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) |
| 52 | { |
| 53 | uint64_t start = MAX(r1.start, r2.start); |
| 54 | /* off-by-one arithmetic to prevent overflow */ |
| 55 | uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1); |
| 56 | return addrrange_make(start, end - start + 1); |
| 57 | } |
| 58 | |
| 59 | struct CoalescedMemoryRange { |
| 60 | AddrRange addr; |
| 61 | QTAILQ_ENTRY(CoalescedMemoryRange) link; |
| 62 | }; |
| 63 | |
| 64 | typedef struct FlatRange FlatRange; |
| 65 | typedef struct FlatView FlatView; |
| 66 | |
| 67 | /* Range of memory in the global map. Addresses are absolute. */ |
| 68 | struct FlatRange { |
| 69 | MemoryRegion *mr; |
| 70 | target_phys_addr_t offset_in_region; |
| 71 | AddrRange addr; |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 72 | uint8_t dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 73 | }; |
| 74 | |
| 75 | /* Flattened global view of current active memory hierarchy. Kept in sorted |
| 76 | * order. |
| 77 | */ |
| 78 | struct FlatView { |
| 79 | FlatRange *ranges; |
| 80 | unsigned nr; |
| 81 | unsigned nr_allocated; |
| 82 | }; |
| 83 | |
| 84 | #define FOR_EACH_FLAT_RANGE(var, view) \ |
| 85 | for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) |
| 86 | |
| 87 | static FlatView current_memory_map; |
| 88 | static MemoryRegion *root_memory_region; |
| 89 | |
| 90 | static bool flatrange_equal(FlatRange *a, FlatRange *b) |
| 91 | { |
| 92 | return a->mr == b->mr |
| 93 | && addrrange_equal(a->addr, b->addr) |
| 94 | && a->offset_in_region == b->offset_in_region; |
| 95 | } |
| 96 | |
| 97 | static void flatview_init(FlatView *view) |
| 98 | { |
| 99 | view->ranges = NULL; |
| 100 | view->nr = 0; |
| 101 | view->nr_allocated = 0; |
| 102 | } |
| 103 | |
| 104 | /* Insert a range into a given position. Caller is responsible for maintaining |
| 105 | * sorting order. |
| 106 | */ |
| 107 | static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
| 108 | { |
| 109 | if (view->nr == view->nr_allocated) { |
| 110 | view->nr_allocated = MAX(2 * view->nr, 10); |
| 111 | view->ranges = qemu_realloc(view->ranges, |
| 112 | view->nr_allocated * sizeof(*view->ranges)); |
| 113 | } |
| 114 | memmove(view->ranges + pos + 1, view->ranges + pos, |
| 115 | (view->nr - pos) * sizeof(FlatRange)); |
| 116 | view->ranges[pos] = *range; |
| 117 | ++view->nr; |
| 118 | } |
| 119 | |
| 120 | static void flatview_destroy(FlatView *view) |
| 121 | { |
| 122 | qemu_free(view->ranges); |
| 123 | } |
| 124 | |
| 125 | /* Render a memory region into the global view. Ranges in @view obscure |
| 126 | * ranges in @mr. |
| 127 | */ |
| 128 | static void render_memory_region(FlatView *view, |
| 129 | MemoryRegion *mr, |
| 130 | target_phys_addr_t base, |
| 131 | AddrRange clip) |
| 132 | { |
| 133 | MemoryRegion *subregion; |
| 134 | unsigned i; |
| 135 | target_phys_addr_t offset_in_region; |
| 136 | uint64_t remain; |
| 137 | uint64_t now; |
| 138 | FlatRange fr; |
| 139 | AddrRange tmp; |
| 140 | |
| 141 | base += mr->addr; |
| 142 | |
| 143 | tmp = addrrange_make(base, mr->size); |
| 144 | |
| 145 | if (!addrrange_intersects(tmp, clip)) { |
| 146 | return; |
| 147 | } |
| 148 | |
| 149 | clip = addrrange_intersection(tmp, clip); |
| 150 | |
| 151 | if (mr->alias) { |
| 152 | base -= mr->alias->addr; |
| 153 | base -= mr->alias_offset; |
| 154 | render_memory_region(view, mr->alias, base, clip); |
| 155 | return; |
| 156 | } |
| 157 | |
| 158 | /* Render subregions in priority order. */ |
| 159 | QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
| 160 | render_memory_region(view, subregion, base, clip); |
| 161 | } |
| 162 | |
| 163 | if (!mr->has_ram_addr) { |
| 164 | return; |
| 165 | } |
| 166 | |
| 167 | offset_in_region = clip.start - base; |
| 168 | base = clip.start; |
| 169 | remain = clip.size; |
| 170 | |
| 171 | /* Render the region itself into any gaps left by the current view. */ |
| 172 | for (i = 0; i < view->nr && remain; ++i) { |
| 173 | if (base >= addrrange_end(view->ranges[i].addr)) { |
| 174 | continue; |
| 175 | } |
| 176 | if (base < view->ranges[i].addr.start) { |
| 177 | now = MIN(remain, view->ranges[i].addr.start - base); |
| 178 | fr.mr = mr; |
| 179 | fr.offset_in_region = offset_in_region; |
| 180 | fr.addr = addrrange_make(base, now); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 181 | fr.dirty_log_mask = mr->dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 182 | flatview_insert(view, i, &fr); |
| 183 | ++i; |
| 184 | base += now; |
| 185 | offset_in_region += now; |
| 186 | remain -= now; |
| 187 | } |
| 188 | if (base == view->ranges[i].addr.start) { |
| 189 | now = MIN(remain, view->ranges[i].addr.size); |
| 190 | base += now; |
| 191 | offset_in_region += now; |
| 192 | remain -= now; |
| 193 | } |
| 194 | } |
| 195 | if (remain) { |
| 196 | fr.mr = mr; |
| 197 | fr.offset_in_region = offset_in_region; |
| 198 | fr.addr = addrrange_make(base, remain); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 199 | fr.dirty_log_mask = mr->dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 200 | flatview_insert(view, i, &fr); |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | /* Render a memory topology into a list of disjoint absolute ranges. */ |
| 205 | static FlatView generate_memory_topology(MemoryRegion *mr) |
| 206 | { |
| 207 | FlatView view; |
| 208 | |
| 209 | flatview_init(&view); |
| 210 | |
| 211 | render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX)); |
| 212 | |
| 213 | return view; |
| 214 | } |
| 215 | |
| 216 | static void memory_region_update_topology(void) |
| 217 | { |
| 218 | FlatView old_view = current_memory_map; |
| 219 | FlatView new_view = generate_memory_topology(root_memory_region); |
| 220 | unsigned iold, inew; |
| 221 | FlatRange *frold, *frnew; |
| 222 | ram_addr_t phys_offset, region_offset; |
| 223 | |
| 224 | /* Generate a symmetric difference of the old and new memory maps. |
| 225 | * Kill ranges in the old map, and instantiate ranges in the new map. |
| 226 | */ |
| 227 | iold = inew = 0; |
| 228 | while (iold < old_view.nr || inew < new_view.nr) { |
| 229 | if (iold < old_view.nr) { |
| 230 | frold = &old_view.ranges[iold]; |
| 231 | } else { |
| 232 | frold = NULL; |
| 233 | } |
| 234 | if (inew < new_view.nr) { |
| 235 | frnew = &new_view.ranges[inew]; |
| 236 | } else { |
| 237 | frnew = NULL; |
| 238 | } |
| 239 | |
| 240 | if (frold |
| 241 | && (!frnew |
| 242 | || frold->addr.start < frnew->addr.start |
| 243 | || (frold->addr.start == frnew->addr.start |
| 244 | && !flatrange_equal(frold, frnew)))) { |
| 245 | /* In old, but (not in new, or in new but attributes changed). */ |
| 246 | |
| 247 | cpu_register_physical_memory(frold->addr.start, frold->addr.size, |
| 248 | IO_MEM_UNASSIGNED); |
| 249 | ++iold; |
| 250 | } else if (frold && frnew && flatrange_equal(frold, frnew)) { |
| 251 | /* In both (logging may have changed) */ |
| 252 | |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 253 | if (frold->dirty_log_mask && !frnew->dirty_log_mask) { |
| 254 | cpu_physical_log_stop(frnew->addr.start, frnew->addr.size); |
| 255 | } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { |
| 256 | cpu_physical_log_start(frnew->addr.start, frnew->addr.size); |
| 257 | } |
| 258 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 259 | ++iold; |
| 260 | ++inew; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 261 | } else { |
| 262 | /* In new */ |
| 263 | |
| 264 | phys_offset = frnew->mr->ram_addr; |
| 265 | region_offset = frnew->offset_in_region; |
| 266 | /* cpu_register_physical_memory_log() wants region_offset for |
| 267 | * mmio, but prefers offseting phys_offset for RAM. Humour it. |
| 268 | */ |
| 269 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { |
| 270 | phys_offset += region_offset; |
| 271 | region_offset = 0; |
| 272 | } |
| 273 | |
| 274 | cpu_register_physical_memory_log(frnew->addr.start, |
| 275 | frnew->addr.size, |
| 276 | phys_offset, |
| 277 | region_offset, |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 278 | frnew->dirty_log_mask); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 279 | ++inew; |
| 280 | } |
| 281 | } |
| 282 | current_memory_map = new_view; |
| 283 | flatview_destroy(&old_view); |
| 284 | } |
| 285 | |
| 286 | void memory_region_init(MemoryRegion *mr, |
| 287 | const char *name, |
| 288 | uint64_t size) |
| 289 | { |
| 290 | mr->ops = NULL; |
| 291 | mr->parent = NULL; |
| 292 | mr->size = size; |
| 293 | mr->addr = 0; |
| 294 | mr->offset = 0; |
| 295 | mr->has_ram_addr = false; |
| 296 | mr->priority = 0; |
| 297 | mr->may_overlap = false; |
| 298 | mr->alias = NULL; |
| 299 | QTAILQ_INIT(&mr->subregions); |
| 300 | memset(&mr->subregions_link, 0, sizeof mr->subregions_link); |
| 301 | QTAILQ_INIT(&mr->coalesced); |
| 302 | mr->name = qemu_strdup(name); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 303 | mr->dirty_log_mask = 0; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | static bool memory_region_access_valid(MemoryRegion *mr, |
| 307 | target_phys_addr_t addr, |
| 308 | unsigned size) |
| 309 | { |
| 310 | if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
| 311 | return false; |
| 312 | } |
| 313 | |
| 314 | /* Treat zero as compatibility all valid */ |
| 315 | if (!mr->ops->valid.max_access_size) { |
| 316 | return true; |
| 317 | } |
| 318 | |
| 319 | if (size > mr->ops->valid.max_access_size |
| 320 | || size < mr->ops->valid.min_access_size) { |
| 321 | return false; |
| 322 | } |
| 323 | return true; |
| 324 | } |
| 325 | |
| 326 | static uint32_t memory_region_read_thunk_n(void *_mr, |
| 327 | target_phys_addr_t addr, |
| 328 | unsigned size) |
| 329 | { |
| 330 | MemoryRegion *mr = _mr; |
| 331 | unsigned access_size, access_size_min, access_size_max; |
| 332 | uint64_t access_mask; |
| 333 | uint32_t data = 0, tmp; |
| 334 | unsigned i; |
| 335 | |
| 336 | if (!memory_region_access_valid(mr, addr, size)) { |
| 337 | return -1U; /* FIXME: better signalling */ |
| 338 | } |
| 339 | |
| 340 | /* FIXME: support unaligned access */ |
| 341 | |
| 342 | access_size_min = mr->ops->impl.min_access_size; |
| 343 | if (!access_size_min) { |
| 344 | access_size_min = 1; |
| 345 | } |
| 346 | access_size_max = mr->ops->impl.max_access_size; |
| 347 | if (!access_size_max) { |
| 348 | access_size_max = 4; |
| 349 | } |
| 350 | access_size = MAX(MIN(size, access_size_max), access_size_min); |
| 351 | access_mask = -1ULL >> (64 - access_size * 8); |
| 352 | addr += mr->offset; |
| 353 | for (i = 0; i < size; i += access_size) { |
| 354 | /* FIXME: big-endian support */ |
| 355 | tmp = mr->ops->read(mr->opaque, addr + i, access_size); |
| 356 | data |= (tmp & access_mask) << (i * 8); |
| 357 | } |
| 358 | |
| 359 | return data; |
| 360 | } |
| 361 | |
| 362 | static void memory_region_write_thunk_n(void *_mr, |
| 363 | target_phys_addr_t addr, |
| 364 | unsigned size, |
| 365 | uint64_t data) |
| 366 | { |
| 367 | MemoryRegion *mr = _mr; |
| 368 | unsigned access_size, access_size_min, access_size_max; |
| 369 | uint64_t access_mask; |
| 370 | unsigned i; |
| 371 | |
| 372 | if (!memory_region_access_valid(mr, addr, size)) { |
| 373 | return; /* FIXME: better signalling */ |
| 374 | } |
| 375 | |
| 376 | /* FIXME: support unaligned access */ |
| 377 | |
| 378 | access_size_min = mr->ops->impl.min_access_size; |
| 379 | if (!access_size_min) { |
| 380 | access_size_min = 1; |
| 381 | } |
| 382 | access_size_max = mr->ops->impl.max_access_size; |
| 383 | if (!access_size_max) { |
| 384 | access_size_max = 4; |
| 385 | } |
| 386 | access_size = MAX(MIN(size, access_size_max), access_size_min); |
| 387 | access_mask = -1ULL >> (64 - access_size * 8); |
| 388 | addr += mr->offset; |
| 389 | for (i = 0; i < size; i += access_size) { |
| 390 | /* FIXME: big-endian support */ |
| 391 | mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask, |
| 392 | access_size); |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr) |
| 397 | { |
| 398 | return memory_region_read_thunk_n(mr, addr, 1); |
| 399 | } |
| 400 | |
| 401 | static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr) |
| 402 | { |
| 403 | return memory_region_read_thunk_n(mr, addr, 2); |
| 404 | } |
| 405 | |
| 406 | static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr) |
| 407 | { |
| 408 | return memory_region_read_thunk_n(mr, addr, 4); |
| 409 | } |
| 410 | |
| 411 | static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr, |
| 412 | uint32_t data) |
| 413 | { |
| 414 | memory_region_write_thunk_n(mr, addr, 1, data); |
| 415 | } |
| 416 | |
| 417 | static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr, |
| 418 | uint32_t data) |
| 419 | { |
| 420 | memory_region_write_thunk_n(mr, addr, 2, data); |
| 421 | } |
| 422 | |
| 423 | static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr, |
| 424 | uint32_t data) |
| 425 | { |
| 426 | memory_region_write_thunk_n(mr, addr, 4, data); |
| 427 | } |
| 428 | |
| 429 | static CPUReadMemoryFunc * const memory_region_read_thunk[] = { |
| 430 | memory_region_read_thunk_b, |
| 431 | memory_region_read_thunk_w, |
| 432 | memory_region_read_thunk_l, |
| 433 | }; |
| 434 | |
| 435 | static CPUWriteMemoryFunc * const memory_region_write_thunk[] = { |
| 436 | memory_region_write_thunk_b, |
| 437 | memory_region_write_thunk_w, |
| 438 | memory_region_write_thunk_l, |
| 439 | }; |
| 440 | |
| 441 | void memory_region_init_io(MemoryRegion *mr, |
| 442 | const MemoryRegionOps *ops, |
| 443 | void *opaque, |
| 444 | const char *name, |
| 445 | uint64_t size) |
| 446 | { |
| 447 | memory_region_init(mr, name, size); |
| 448 | mr->ops = ops; |
| 449 | mr->opaque = opaque; |
| 450 | mr->has_ram_addr = true; |
| 451 | mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk, |
| 452 | memory_region_write_thunk, |
| 453 | mr, |
| 454 | mr->ops->endianness); |
| 455 | } |
| 456 | |
| 457 | void memory_region_init_ram(MemoryRegion *mr, |
| 458 | DeviceState *dev, |
| 459 | const char *name, |
| 460 | uint64_t size) |
| 461 | { |
| 462 | memory_region_init(mr, name, size); |
| 463 | mr->has_ram_addr = true; |
| 464 | mr->ram_addr = qemu_ram_alloc(dev, name, size); |
| 465 | } |
| 466 | |
| 467 | void memory_region_init_ram_ptr(MemoryRegion *mr, |
| 468 | DeviceState *dev, |
| 469 | const char *name, |
| 470 | uint64_t size, |
| 471 | void *ptr) |
| 472 | { |
| 473 | memory_region_init(mr, name, size); |
| 474 | mr->has_ram_addr = true; |
| 475 | mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr); |
| 476 | } |
| 477 | |
| 478 | void memory_region_init_alias(MemoryRegion *mr, |
| 479 | const char *name, |
| 480 | MemoryRegion *orig, |
| 481 | target_phys_addr_t offset, |
| 482 | uint64_t size) |
| 483 | { |
| 484 | memory_region_init(mr, name, size); |
| 485 | mr->alias = orig; |
| 486 | mr->alias_offset = offset; |
| 487 | } |
| 488 | |
| 489 | void memory_region_destroy(MemoryRegion *mr) |
| 490 | { |
| 491 | assert(QTAILQ_EMPTY(&mr->subregions)); |
| 492 | memory_region_clear_coalescing(mr); |
| 493 | qemu_free((char *)mr->name); |
| 494 | } |
| 495 | |
| 496 | uint64_t memory_region_size(MemoryRegion *mr) |
| 497 | { |
| 498 | return mr->size; |
| 499 | } |
| 500 | |
| 501 | void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset) |
| 502 | { |
| 503 | mr->offset = offset; |
| 504 | } |
| 505 | |
| 506 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
| 507 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 508 | uint8_t mask = 1 << client; |
| 509 | |
| 510 | mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
| 511 | memory_region_update_topology(); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr, |
| 515 | unsigned client) |
| 516 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 517 | assert(mr->has_ram_addr); |
| 518 | return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 519 | } |
| 520 | |
| 521 | void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr) |
| 522 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 523 | assert(mr->has_ram_addr); |
| 524 | return cpu_physical_memory_set_dirty(mr->ram_addr + addr); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 525 | } |
| 526 | |
| 527 | void memory_region_sync_dirty_bitmap(MemoryRegion *mr) |
| 528 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 529 | FlatRange *fr; |
| 530 | |
| 531 | FOR_EACH_FLAT_RANGE(fr, ¤t_memory_map) { |
| 532 | if (fr->mr == mr) { |
| 533 | cpu_physical_sync_dirty_bitmap(fr->addr.start, |
| 534 | fr->addr.start + fr->addr.size); |
| 535 | } |
| 536 | } |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 537 | } |
| 538 | |
| 539 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
| 540 | { |
| 541 | /* FIXME */ |
| 542 | } |
| 543 | |
| 544 | void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr, |
| 545 | target_phys_addr_t size, unsigned client) |
| 546 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame^] | 547 | assert(mr->has_ram_addr); |
| 548 | cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
| 549 | mr->ram_addr + addr + size, |
| 550 | 1 << client); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 551 | } |
| 552 | |
| 553 | void *memory_region_get_ram_ptr(MemoryRegion *mr) |
| 554 | { |
| 555 | if (mr->alias) { |
| 556 | return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; |
| 557 | } |
| 558 | |
| 559 | assert(mr->has_ram_addr); |
| 560 | |
| 561 | return qemu_get_ram_ptr(mr->ram_addr); |
| 562 | } |
| 563 | |
| 564 | static void memory_region_update_coalesced_range(MemoryRegion *mr) |
| 565 | { |
| 566 | FlatRange *fr; |
| 567 | CoalescedMemoryRange *cmr; |
| 568 | AddrRange tmp; |
| 569 | |
| 570 | FOR_EACH_FLAT_RANGE(fr, ¤t_memory_map) { |
| 571 | if (fr->mr == mr) { |
| 572 | qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size); |
| 573 | QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
| 574 | tmp = addrrange_shift(cmr->addr, |
| 575 | fr->addr.start - fr->offset_in_region); |
| 576 | if (!addrrange_intersects(tmp, fr->addr)) { |
| 577 | continue; |
| 578 | } |
| 579 | tmp = addrrange_intersection(tmp, fr->addr); |
| 580 | qemu_register_coalesced_mmio(tmp.start, tmp.size); |
| 581 | } |
| 582 | } |
| 583 | } |
| 584 | } |
| 585 | |
| 586 | void memory_region_set_coalescing(MemoryRegion *mr) |
| 587 | { |
| 588 | memory_region_clear_coalescing(mr); |
| 589 | memory_region_add_coalescing(mr, 0, mr->size); |
| 590 | } |
| 591 | |
| 592 | void memory_region_add_coalescing(MemoryRegion *mr, |
| 593 | target_phys_addr_t offset, |
| 594 | uint64_t size) |
| 595 | { |
| 596 | CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr)); |
| 597 | |
| 598 | cmr->addr = addrrange_make(offset, size); |
| 599 | QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
| 600 | memory_region_update_coalesced_range(mr); |
| 601 | } |
| 602 | |
| 603 | void memory_region_clear_coalescing(MemoryRegion *mr) |
| 604 | { |
| 605 | CoalescedMemoryRange *cmr; |
| 606 | |
| 607 | while (!QTAILQ_EMPTY(&mr->coalesced)) { |
| 608 | cmr = QTAILQ_FIRST(&mr->coalesced); |
| 609 | QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
| 610 | qemu_free(cmr); |
| 611 | } |
| 612 | memory_region_update_coalesced_range(mr); |
| 613 | } |
| 614 | |
| 615 | static void memory_region_add_subregion_common(MemoryRegion *mr, |
| 616 | target_phys_addr_t offset, |
| 617 | MemoryRegion *subregion) |
| 618 | { |
| 619 | MemoryRegion *other; |
| 620 | |
| 621 | assert(!subregion->parent); |
| 622 | subregion->parent = mr; |
| 623 | subregion->addr = offset; |
| 624 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
| 625 | if (subregion->may_overlap || other->may_overlap) { |
| 626 | continue; |
| 627 | } |
| 628 | if (offset >= other->offset + other->size |
| 629 | || offset + subregion->size <= other->offset) { |
| 630 | continue; |
| 631 | } |
| 632 | printf("warning: subregion collision %llx/%llx vs %llx/%llx\n", |
| 633 | (unsigned long long)offset, |
| 634 | (unsigned long long)subregion->size, |
| 635 | (unsigned long long)other->offset, |
| 636 | (unsigned long long)other->size); |
| 637 | } |
| 638 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
| 639 | if (subregion->priority >= other->priority) { |
| 640 | QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
| 641 | goto done; |
| 642 | } |
| 643 | } |
| 644 | QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
| 645 | done: |
| 646 | memory_region_update_topology(); |
| 647 | } |
| 648 | |
| 649 | |
| 650 | void memory_region_add_subregion(MemoryRegion *mr, |
| 651 | target_phys_addr_t offset, |
| 652 | MemoryRegion *subregion) |
| 653 | { |
| 654 | subregion->may_overlap = false; |
| 655 | subregion->priority = 0; |
| 656 | memory_region_add_subregion_common(mr, offset, subregion); |
| 657 | } |
| 658 | |
| 659 | void memory_region_add_subregion_overlap(MemoryRegion *mr, |
| 660 | target_phys_addr_t offset, |
| 661 | MemoryRegion *subregion, |
| 662 | unsigned priority) |
| 663 | { |
| 664 | subregion->may_overlap = true; |
| 665 | subregion->priority = priority; |
| 666 | memory_region_add_subregion_common(mr, offset, subregion); |
| 667 | } |
| 668 | |
| 669 | void memory_region_del_subregion(MemoryRegion *mr, |
| 670 | MemoryRegion *subregion) |
| 671 | { |
| 672 | assert(subregion->parent == mr); |
| 673 | subregion->parent = NULL; |
| 674 | QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
| 675 | memory_region_update_topology(); |
| 676 | } |