exec: remove ram_addr argument from qemu_ram_block_from_host
[qemu.git] / memory.c
blobd6a4a6869fccd3aad07fc421470cd6202fa606b3
1 /*
2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "cpu.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "exec/ioport.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qom/object.h"
27 #include "trace.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
34 //#define DEBUG_UNASSIGNED
36 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
38 static unsigned memory_region_transaction_depth;
39 static bool memory_region_update_pending;
40 static bool ioeventfd_update_pending;
41 static bool global_dirty_log = false;
43 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
44 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
46 static QTAILQ_HEAD(, AddressSpace) address_spaces
47 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49 typedef struct AddrRange AddrRange;
52 * Note that signed integers are needed for negative offsetting in aliases
53 * (large MemoryRegion::alias_offset).
55 struct AddrRange {
56 Int128 start;
57 Int128 size;
60 static AddrRange addrrange_make(Int128 start, Int128 size)
62 return (AddrRange) { start, size };
65 static bool addrrange_equal(AddrRange r1, AddrRange r2)
67 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
70 static Int128 addrrange_end(AddrRange r)
72 return int128_add(r.start, r.size);
75 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
77 int128_addto(&range.start, delta);
78 return range;
81 static bool addrrange_contains(AddrRange range, Int128 addr)
83 return int128_ge(addr, range.start)
84 && int128_lt(addr, addrrange_end(range));
87 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89 return addrrange_contains(r1, r2.start)
90 || addrrange_contains(r2, r1.start);
93 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95 Int128 start = int128_max(r1.start, r2.start);
96 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
97 return addrrange_make(start, int128_sub(end, start));
100 enum ListenerDirection { Forward, Reverse };
102 static bool memory_listener_match(MemoryListener *listener,
103 MemoryRegionSection *section)
105 return !listener->address_space_filter
106 || listener->address_space_filter == section->address_space;
109 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
110 do { \
111 MemoryListener *_listener; \
113 switch (_direction) { \
114 case Forward: \
115 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
116 if (_listener->_callback) { \
117 _listener->_callback(_listener, ##_args); \
120 break; \
121 case Reverse: \
122 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
123 memory_listeners, link) { \
124 if (_listener->_callback) { \
125 _listener->_callback(_listener, ##_args); \
128 break; \
129 default: \
130 abort(); \
132 } while (0)
134 #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
135 do { \
136 MemoryListener *_listener; \
138 switch (_direction) { \
139 case Forward: \
140 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
141 if (_listener->_callback \
142 && memory_listener_match(_listener, _section)) { \
143 _listener->_callback(_listener, _section, ##_args); \
146 break; \
147 case Reverse: \
148 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
149 memory_listeners, link) { \
150 if (_listener->_callback \
151 && memory_listener_match(_listener, _section)) { \
152 _listener->_callback(_listener, _section, ##_args); \
155 break; \
156 default: \
157 abort(); \
159 } while (0)
161 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
162 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
163 MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
164 .mr = (fr)->mr, \
165 .address_space = (as), \
166 .offset_within_region = (fr)->offset_in_region, \
167 .size = (fr)->addr.size, \
168 .offset_within_address_space = int128_get64((fr)->addr.start), \
169 .readonly = (fr)->readonly, \
170 }), ##_args)
172 struct CoalescedMemoryRange {
173 AddrRange addr;
174 QTAILQ_ENTRY(CoalescedMemoryRange) link;
177 struct MemoryRegionIoeventfd {
178 AddrRange addr;
179 bool match_data;
180 uint64_t data;
181 EventNotifier *e;
184 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
185 MemoryRegionIoeventfd b)
187 if (int128_lt(a.addr.start, b.addr.start)) {
188 return true;
189 } else if (int128_gt(a.addr.start, b.addr.start)) {
190 return false;
191 } else if (int128_lt(a.addr.size, b.addr.size)) {
192 return true;
193 } else if (int128_gt(a.addr.size, b.addr.size)) {
194 return false;
195 } else if (a.match_data < b.match_data) {
196 return true;
197 } else if (a.match_data > b.match_data) {
198 return false;
199 } else if (a.match_data) {
200 if (a.data < b.data) {
201 return true;
202 } else if (a.data > b.data) {
203 return false;
206 if (a.e < b.e) {
207 return true;
208 } else if (a.e > b.e) {
209 return false;
211 return false;
214 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
215 MemoryRegionIoeventfd b)
217 return !memory_region_ioeventfd_before(a, b)
218 && !memory_region_ioeventfd_before(b, a);
221 typedef struct FlatRange FlatRange;
222 typedef struct FlatView FlatView;
224 /* Range of memory in the global map. Addresses are absolute. */
225 struct FlatRange {
226 MemoryRegion *mr;
227 hwaddr offset_in_region;
228 AddrRange addr;
229 uint8_t dirty_log_mask;
230 bool romd_mode;
231 bool readonly;
234 /* Flattened global view of current active memory hierarchy. Kept in sorted
235 * order.
237 struct FlatView {
238 struct rcu_head rcu;
239 unsigned ref;
240 FlatRange *ranges;
241 unsigned nr;
242 unsigned nr_allocated;
245 typedef struct AddressSpaceOps AddressSpaceOps;
247 #define FOR_EACH_FLAT_RANGE(var, view) \
248 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
250 static bool flatrange_equal(FlatRange *a, FlatRange *b)
252 return a->mr == b->mr
253 && addrrange_equal(a->addr, b->addr)
254 && a->offset_in_region == b->offset_in_region
255 && a->romd_mode == b->romd_mode
256 && a->readonly == b->readonly;
259 static void flatview_init(FlatView *view)
261 view->ref = 1;
262 view->ranges = NULL;
263 view->nr = 0;
264 view->nr_allocated = 0;
267 /* Insert a range into a given position. Caller is responsible for maintaining
268 * sorting order.
270 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
272 if (view->nr == view->nr_allocated) {
273 view->nr_allocated = MAX(2 * view->nr, 10);
274 view->ranges = g_realloc(view->ranges,
275 view->nr_allocated * sizeof(*view->ranges));
277 memmove(view->ranges + pos + 1, view->ranges + pos,
278 (view->nr - pos) * sizeof(FlatRange));
279 view->ranges[pos] = *range;
280 memory_region_ref(range->mr);
281 ++view->nr;
284 static void flatview_destroy(FlatView *view)
286 int i;
288 for (i = 0; i < view->nr; i++) {
289 memory_region_unref(view->ranges[i].mr);
291 g_free(view->ranges);
292 g_free(view);
295 static void flatview_ref(FlatView *view)
297 atomic_inc(&view->ref);
300 static void flatview_unref(FlatView *view)
302 if (atomic_fetch_dec(&view->ref) == 1) {
303 flatview_destroy(view);
307 static bool can_merge(FlatRange *r1, FlatRange *r2)
309 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
310 && r1->mr == r2->mr
311 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
312 r1->addr.size),
313 int128_make64(r2->offset_in_region))
314 && r1->dirty_log_mask == r2->dirty_log_mask
315 && r1->romd_mode == r2->romd_mode
316 && r1->readonly == r2->readonly;
319 /* Attempt to simplify a view by merging adjacent ranges */
320 static void flatview_simplify(FlatView *view)
322 unsigned i, j;
324 i = 0;
325 while (i < view->nr) {
326 j = i + 1;
327 while (j < view->nr
328 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
329 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
330 ++j;
332 ++i;
333 memmove(&view->ranges[i], &view->ranges[j],
334 (view->nr - j) * sizeof(view->ranges[j]));
335 view->nr -= j - i;
339 static bool memory_region_big_endian(MemoryRegion *mr)
341 #ifdef TARGET_WORDS_BIGENDIAN
342 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
343 #else
344 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
345 #endif
348 static bool memory_region_wrong_endianness(MemoryRegion *mr)
350 #ifdef TARGET_WORDS_BIGENDIAN
351 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
352 #else
353 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
354 #endif
357 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
359 if (memory_region_wrong_endianness(mr)) {
360 switch (size) {
361 case 1:
362 break;
363 case 2:
364 *data = bswap16(*data);
365 break;
366 case 4:
367 *data = bswap32(*data);
368 break;
369 case 8:
370 *data = bswap64(*data);
371 break;
372 default:
373 abort();
378 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
380 MemoryRegion *root;
381 hwaddr abs_addr = offset;
383 abs_addr += mr->addr;
384 for (root = mr; root->container; ) {
385 root = root->container;
386 abs_addr += root->addr;
389 return abs_addr;
392 static int get_cpu_index(void)
394 if (current_cpu) {
395 return current_cpu->cpu_index;
397 return -1;
400 static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
401 hwaddr addr,
402 uint64_t *value,
403 unsigned size,
404 unsigned shift,
405 uint64_t mask,
406 MemTxAttrs attrs)
408 uint64_t tmp;
410 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
411 if (mr->subpage) {
412 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
413 } else if (mr == &io_mem_notdirty) {
414 /* Accesses to code which has previously been translated into a TB show
415 * up in the MMIO path, as accesses to the io_mem_notdirty
416 * MemoryRegion. */
417 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
418 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
419 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
420 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
422 *value |= (tmp & mask) << shift;
423 return MEMTX_OK;
426 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
427 hwaddr addr,
428 uint64_t *value,
429 unsigned size,
430 unsigned shift,
431 uint64_t mask,
432 MemTxAttrs attrs)
434 uint64_t tmp;
436 tmp = mr->ops->read(mr->opaque, addr, size);
437 if (mr->subpage) {
438 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
439 } else if (mr == &io_mem_notdirty) {
440 /* Accesses to code which has previously been translated into a TB show
441 * up in the MMIO path, as accesses to the io_mem_notdirty
442 * MemoryRegion. */
443 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
444 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
445 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
446 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
448 *value |= (tmp & mask) << shift;
449 return MEMTX_OK;
452 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
453 hwaddr addr,
454 uint64_t *value,
455 unsigned size,
456 unsigned shift,
457 uint64_t mask,
458 MemTxAttrs attrs)
460 uint64_t tmp = 0;
461 MemTxResult r;
463 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
464 if (mr->subpage) {
465 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
466 } else if (mr == &io_mem_notdirty) {
467 /* Accesses to code which has previously been translated into a TB show
468 * up in the MMIO path, as accesses to the io_mem_notdirty
469 * MemoryRegion. */
470 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
471 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
472 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
473 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
475 *value |= (tmp & mask) << shift;
476 return r;
479 static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
480 hwaddr addr,
481 uint64_t *value,
482 unsigned size,
483 unsigned shift,
484 uint64_t mask,
485 MemTxAttrs attrs)
487 uint64_t tmp;
489 tmp = (*value >> shift) & mask;
490 if (mr->subpage) {
491 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
492 } else if (mr == &io_mem_notdirty) {
493 /* Accesses to code which has previously been translated into a TB show
494 * up in the MMIO path, as accesses to the io_mem_notdirty
495 * MemoryRegion. */
496 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
497 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
498 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
499 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
501 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
502 return MEMTX_OK;
505 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
506 hwaddr addr,
507 uint64_t *value,
508 unsigned size,
509 unsigned shift,
510 uint64_t mask,
511 MemTxAttrs attrs)
513 uint64_t tmp;
515 tmp = (*value >> shift) & mask;
516 if (mr->subpage) {
517 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
518 } else if (mr == &io_mem_notdirty) {
519 /* Accesses to code which has previously been translated into a TB show
520 * up in the MMIO path, as accesses to the io_mem_notdirty
521 * MemoryRegion. */
522 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
523 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
524 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
525 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
527 mr->ops->write(mr->opaque, addr, tmp, size);
528 return MEMTX_OK;
531 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
532 hwaddr addr,
533 uint64_t *value,
534 unsigned size,
535 unsigned shift,
536 uint64_t mask,
537 MemTxAttrs attrs)
539 uint64_t tmp;
541 tmp = (*value >> shift) & mask;
542 if (mr->subpage) {
543 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
544 } else if (mr == &io_mem_notdirty) {
545 /* Accesses to code which has previously been translated into a TB show
546 * up in the MMIO path, as accesses to the io_mem_notdirty
547 * MemoryRegion. */
548 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
549 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
550 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
551 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
553 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
556 static MemTxResult access_with_adjusted_size(hwaddr addr,
557 uint64_t *value,
558 unsigned size,
559 unsigned access_size_min,
560 unsigned access_size_max,
561 MemTxResult (*access)(MemoryRegion *mr,
562 hwaddr addr,
563 uint64_t *value,
564 unsigned size,
565 unsigned shift,
566 uint64_t mask,
567 MemTxAttrs attrs),
568 MemoryRegion *mr,
569 MemTxAttrs attrs)
571 uint64_t access_mask;
572 unsigned access_size;
573 unsigned i;
574 MemTxResult r = MEMTX_OK;
576 if (!access_size_min) {
577 access_size_min = 1;
579 if (!access_size_max) {
580 access_size_max = 4;
583 /* FIXME: support unaligned access? */
584 access_size = MAX(MIN(size, access_size_max), access_size_min);
585 access_mask = -1ULL >> (64 - access_size * 8);
586 if (memory_region_big_endian(mr)) {
587 for (i = 0; i < size; i += access_size) {
588 r |= access(mr, addr + i, value, access_size,
589 (size - access_size - i) * 8, access_mask, attrs);
591 } else {
592 for (i = 0; i < size; i += access_size) {
593 r |= access(mr, addr + i, value, access_size, i * 8,
594 access_mask, attrs);
597 return r;
600 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
602 AddressSpace *as;
604 while (mr->container) {
605 mr = mr->container;
607 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
608 if (mr == as->root) {
609 return as;
612 return NULL;
615 /* Render a memory region into the global view. Ranges in @view obscure
616 * ranges in @mr.
618 static void render_memory_region(FlatView *view,
619 MemoryRegion *mr,
620 Int128 base,
621 AddrRange clip,
622 bool readonly)
624 MemoryRegion *subregion;
625 unsigned i;
626 hwaddr offset_in_region;
627 Int128 remain;
628 Int128 now;
629 FlatRange fr;
630 AddrRange tmp;
632 if (!mr->enabled) {
633 return;
636 int128_addto(&base, int128_make64(mr->addr));
637 readonly |= mr->readonly;
639 tmp = addrrange_make(base, mr->size);
641 if (!addrrange_intersects(tmp, clip)) {
642 return;
645 clip = addrrange_intersection(tmp, clip);
647 if (mr->alias) {
648 int128_subfrom(&base, int128_make64(mr->alias->addr));
649 int128_subfrom(&base, int128_make64(mr->alias_offset));
650 render_memory_region(view, mr->alias, base, clip, readonly);
651 return;
654 /* Render subregions in priority order. */
655 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
656 render_memory_region(view, subregion, base, clip, readonly);
659 if (!mr->terminates) {
660 return;
663 offset_in_region = int128_get64(int128_sub(clip.start, base));
664 base = clip.start;
665 remain = clip.size;
667 fr.mr = mr;
668 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
669 fr.romd_mode = mr->romd_mode;
670 fr.readonly = readonly;
672 /* Render the region itself into any gaps left by the current view. */
673 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
674 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
675 continue;
677 if (int128_lt(base, view->ranges[i].addr.start)) {
678 now = int128_min(remain,
679 int128_sub(view->ranges[i].addr.start, base));
680 fr.offset_in_region = offset_in_region;
681 fr.addr = addrrange_make(base, now);
682 flatview_insert(view, i, &fr);
683 ++i;
684 int128_addto(&base, now);
685 offset_in_region += int128_get64(now);
686 int128_subfrom(&remain, now);
688 now = int128_sub(int128_min(int128_add(base, remain),
689 addrrange_end(view->ranges[i].addr)),
690 base);
691 int128_addto(&base, now);
692 offset_in_region += int128_get64(now);
693 int128_subfrom(&remain, now);
695 if (int128_nz(remain)) {
696 fr.offset_in_region = offset_in_region;
697 fr.addr = addrrange_make(base, remain);
698 flatview_insert(view, i, &fr);
702 /* Render a memory topology into a list of disjoint absolute ranges. */
703 static FlatView *generate_memory_topology(MemoryRegion *mr)
705 FlatView *view;
707 view = g_new(FlatView, 1);
708 flatview_init(view);
710 if (mr) {
711 render_memory_region(view, mr, int128_zero(),
712 addrrange_make(int128_zero(), int128_2_64()), false);
714 flatview_simplify(view);
716 return view;
719 static void address_space_add_del_ioeventfds(AddressSpace *as,
720 MemoryRegionIoeventfd *fds_new,
721 unsigned fds_new_nb,
722 MemoryRegionIoeventfd *fds_old,
723 unsigned fds_old_nb)
725 unsigned iold, inew;
726 MemoryRegionIoeventfd *fd;
727 MemoryRegionSection section;
729 /* Generate a symmetric difference of the old and new fd sets, adding
730 * and deleting as necessary.
733 iold = inew = 0;
734 while (iold < fds_old_nb || inew < fds_new_nb) {
735 if (iold < fds_old_nb
736 && (inew == fds_new_nb
737 || memory_region_ioeventfd_before(fds_old[iold],
738 fds_new[inew]))) {
739 fd = &fds_old[iold];
740 section = (MemoryRegionSection) {
741 .address_space = as,
742 .offset_within_address_space = int128_get64(fd->addr.start),
743 .size = fd->addr.size,
745 MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
746 fd->match_data, fd->data, fd->e);
747 ++iold;
748 } else if (inew < fds_new_nb
749 && (iold == fds_old_nb
750 || memory_region_ioeventfd_before(fds_new[inew],
751 fds_old[iold]))) {
752 fd = &fds_new[inew];
753 section = (MemoryRegionSection) {
754 .address_space = as,
755 .offset_within_address_space = int128_get64(fd->addr.start),
756 .size = fd->addr.size,
758 MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
759 fd->match_data, fd->data, fd->e);
760 ++inew;
761 } else {
762 ++iold;
763 ++inew;
768 static FlatView *address_space_get_flatview(AddressSpace *as)
770 FlatView *view;
772 rcu_read_lock();
773 view = atomic_rcu_read(&as->current_map);
774 flatview_ref(view);
775 rcu_read_unlock();
776 return view;
779 static void address_space_update_ioeventfds(AddressSpace *as)
781 FlatView *view;
782 FlatRange *fr;
783 unsigned ioeventfd_nb = 0;
784 MemoryRegionIoeventfd *ioeventfds = NULL;
785 AddrRange tmp;
786 unsigned i;
788 view = address_space_get_flatview(as);
789 FOR_EACH_FLAT_RANGE(fr, view) {
790 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
791 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
792 int128_sub(fr->addr.start,
793 int128_make64(fr->offset_in_region)));
794 if (addrrange_intersects(fr->addr, tmp)) {
795 ++ioeventfd_nb;
796 ioeventfds = g_realloc(ioeventfds,
797 ioeventfd_nb * sizeof(*ioeventfds));
798 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
799 ioeventfds[ioeventfd_nb-1].addr = tmp;
804 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
805 as->ioeventfds, as->ioeventfd_nb);
807 g_free(as->ioeventfds);
808 as->ioeventfds = ioeventfds;
809 as->ioeventfd_nb = ioeventfd_nb;
810 flatview_unref(view);
813 static void address_space_update_topology_pass(AddressSpace *as,
814 const FlatView *old_view,
815 const FlatView *new_view,
816 bool adding)
818 unsigned iold, inew;
819 FlatRange *frold, *frnew;
821 /* Generate a symmetric difference of the old and new memory maps.
822 * Kill ranges in the old map, and instantiate ranges in the new map.
824 iold = inew = 0;
825 while (iold < old_view->nr || inew < new_view->nr) {
826 if (iold < old_view->nr) {
827 frold = &old_view->ranges[iold];
828 } else {
829 frold = NULL;
831 if (inew < new_view->nr) {
832 frnew = &new_view->ranges[inew];
833 } else {
834 frnew = NULL;
837 if (frold
838 && (!frnew
839 || int128_lt(frold->addr.start, frnew->addr.start)
840 || (int128_eq(frold->addr.start, frnew->addr.start)
841 && !flatrange_equal(frold, frnew)))) {
842 /* In old but not in new, or in both but attributes changed. */
844 if (!adding) {
845 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
848 ++iold;
849 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
850 /* In both and unchanged (except logging may have changed) */
852 if (adding) {
853 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
854 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
855 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
856 frold->dirty_log_mask,
857 frnew->dirty_log_mask);
859 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
860 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
861 frold->dirty_log_mask,
862 frnew->dirty_log_mask);
866 ++iold;
867 ++inew;
868 } else {
869 /* In new */
871 if (adding) {
872 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
875 ++inew;
881 static void address_space_update_topology(AddressSpace *as)
883 FlatView *old_view = address_space_get_flatview(as);
884 FlatView *new_view = generate_memory_topology(as->root);
886 address_space_update_topology_pass(as, old_view, new_view, false);
887 address_space_update_topology_pass(as, old_view, new_view, true);
889 /* Writes are protected by the BQL. */
890 atomic_rcu_set(&as->current_map, new_view);
891 call_rcu(old_view, flatview_unref, rcu);
893 /* Note that all the old MemoryRegions are still alive up to this
894 * point. This relieves most MemoryListeners from the need to
895 * ref/unref the MemoryRegions they get---unless they use them
896 * outside the iothread mutex, in which case precise reference
897 * counting is necessary.
899 flatview_unref(old_view);
901 address_space_update_ioeventfds(as);
904 void memory_region_transaction_begin(void)
906 qemu_flush_coalesced_mmio_buffer();
907 ++memory_region_transaction_depth;
910 static void memory_region_clear_pending(void)
912 memory_region_update_pending = false;
913 ioeventfd_update_pending = false;
916 void memory_region_transaction_commit(void)
918 AddressSpace *as;
920 assert(memory_region_transaction_depth);
921 --memory_region_transaction_depth;
922 if (!memory_region_transaction_depth) {
923 if (memory_region_update_pending) {
924 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
926 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
927 address_space_update_topology(as);
930 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
931 } else if (ioeventfd_update_pending) {
932 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
933 address_space_update_ioeventfds(as);
936 memory_region_clear_pending();
940 static void memory_region_destructor_none(MemoryRegion *mr)
944 static void memory_region_destructor_ram(MemoryRegion *mr)
946 qemu_ram_free(mr->ram_block);
949 static void memory_region_destructor_rom_device(MemoryRegion *mr)
951 qemu_ram_free(mr->ram_block);
954 static bool memory_region_need_escape(char c)
956 return c == '/' || c == '[' || c == '\\' || c == ']';
959 static char *memory_region_escape_name(const char *name)
961 const char *p;
962 char *escaped, *q;
963 uint8_t c;
964 size_t bytes = 0;
966 for (p = name; *p; p++) {
967 bytes += memory_region_need_escape(*p) ? 4 : 1;
969 if (bytes == p - name) {
970 return g_memdup(name, bytes + 1);
973 escaped = g_malloc(bytes + 1);
974 for (p = name, q = escaped; *p; p++) {
975 c = *p;
976 if (unlikely(memory_region_need_escape(c))) {
977 *q++ = '\\';
978 *q++ = 'x';
979 *q++ = "0123456789abcdef"[c >> 4];
980 c = "0123456789abcdef"[c & 15];
982 *q++ = c;
984 *q = 0;
985 return escaped;
988 void memory_region_init(MemoryRegion *mr,
989 Object *owner,
990 const char *name,
991 uint64_t size)
993 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
994 mr->size = int128_make64(size);
995 if (size == UINT64_MAX) {
996 mr->size = int128_2_64();
998 mr->name = g_strdup(name);
999 mr->owner = owner;
1000 mr->ram_block = NULL;
1002 if (name) {
1003 char *escaped_name = memory_region_escape_name(name);
1004 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1006 if (!owner) {
1007 owner = container_get(qdev_get_machine(), "/unattached");
1010 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1011 object_unref(OBJECT(mr));
1012 g_free(name_array);
1013 g_free(escaped_name);
1017 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1018 void *opaque, Error **errp)
1020 MemoryRegion *mr = MEMORY_REGION(obj);
1021 uint64_t value = mr->addr;
1023 visit_type_uint64(v, name, &value, errp);
1026 static void memory_region_get_container(Object *obj, Visitor *v,
1027 const char *name, void *opaque,
1028 Error **errp)
1030 MemoryRegion *mr = MEMORY_REGION(obj);
1031 gchar *path = (gchar *)"";
1033 if (mr->container) {
1034 path = object_get_canonical_path(OBJECT(mr->container));
1036 visit_type_str(v, name, &path, errp);
1037 if (mr->container) {
1038 g_free(path);
1042 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1043 const char *part)
1045 MemoryRegion *mr = MEMORY_REGION(obj);
1047 return OBJECT(mr->container);
1050 static void memory_region_get_priority(Object *obj, Visitor *v,
1051 const char *name, void *opaque,
1052 Error **errp)
1054 MemoryRegion *mr = MEMORY_REGION(obj);
1055 int32_t value = mr->priority;
1057 visit_type_int32(v, name, &value, errp);
1060 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1061 void *opaque, Error **errp)
1063 MemoryRegion *mr = MEMORY_REGION(obj);
1064 uint64_t value = memory_region_size(mr);
1066 visit_type_uint64(v, name, &value, errp);
1069 static void memory_region_initfn(Object *obj)
1071 MemoryRegion *mr = MEMORY_REGION(obj);
1072 ObjectProperty *op;
1074 mr->ops = &unassigned_mem_ops;
1075 mr->enabled = true;
1076 mr->romd_mode = true;
1077 mr->global_locking = true;
1078 mr->destructor = memory_region_destructor_none;
1079 QTAILQ_INIT(&mr->subregions);
1080 QTAILQ_INIT(&mr->coalesced);
1082 op = object_property_add(OBJECT(mr), "container",
1083 "link<" TYPE_MEMORY_REGION ">",
1084 memory_region_get_container,
1085 NULL, /* memory_region_set_container */
1086 NULL, NULL, &error_abort);
1087 op->resolve = memory_region_resolve_container;
1089 object_property_add(OBJECT(mr), "addr", "uint64",
1090 memory_region_get_addr,
1091 NULL, /* memory_region_set_addr */
1092 NULL, NULL, &error_abort);
1093 object_property_add(OBJECT(mr), "priority", "uint32",
1094 memory_region_get_priority,
1095 NULL, /* memory_region_set_priority */
1096 NULL, NULL, &error_abort);
1097 object_property_add(OBJECT(mr), "size", "uint64",
1098 memory_region_get_size,
1099 NULL, /* memory_region_set_size, */
1100 NULL, NULL, &error_abort);
1103 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1104 unsigned size)
1106 #ifdef DEBUG_UNASSIGNED
1107 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1108 #endif
1109 if (current_cpu != NULL) {
1110 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1112 return 0;
1115 static void unassigned_mem_write(void *opaque, hwaddr addr,
1116 uint64_t val, unsigned size)
1118 #ifdef DEBUG_UNASSIGNED
1119 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1120 #endif
1121 if (current_cpu != NULL) {
1122 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1126 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1127 unsigned size, bool is_write)
1129 return false;
1132 const MemoryRegionOps unassigned_mem_ops = {
1133 .valid.accepts = unassigned_mem_accepts,
1134 .endianness = DEVICE_NATIVE_ENDIAN,
1137 bool memory_region_access_valid(MemoryRegion *mr,
1138 hwaddr addr,
1139 unsigned size,
1140 bool is_write)
1142 int access_size_min, access_size_max;
1143 int access_size, i;
1145 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1146 return false;
1149 if (!mr->ops->valid.accepts) {
1150 return true;
1153 access_size_min = mr->ops->valid.min_access_size;
1154 if (!mr->ops->valid.min_access_size) {
1155 access_size_min = 1;
1158 access_size_max = mr->ops->valid.max_access_size;
1159 if (!mr->ops->valid.max_access_size) {
1160 access_size_max = 4;
1163 access_size = MAX(MIN(size, access_size_max), access_size_min);
1164 for (i = 0; i < size; i += access_size) {
1165 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1166 is_write)) {
1167 return false;
1171 return true;
1174 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1175 hwaddr addr,
1176 uint64_t *pval,
1177 unsigned size,
1178 MemTxAttrs attrs)
1180 *pval = 0;
1182 if (mr->ops->read) {
1183 return access_with_adjusted_size(addr, pval, size,
1184 mr->ops->impl.min_access_size,
1185 mr->ops->impl.max_access_size,
1186 memory_region_read_accessor,
1187 mr, attrs);
1188 } else if (mr->ops->read_with_attrs) {
1189 return access_with_adjusted_size(addr, pval, size,
1190 mr->ops->impl.min_access_size,
1191 mr->ops->impl.max_access_size,
1192 memory_region_read_with_attrs_accessor,
1193 mr, attrs);
1194 } else {
1195 return access_with_adjusted_size(addr, pval, size, 1, 4,
1196 memory_region_oldmmio_read_accessor,
1197 mr, attrs);
1201 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1202 hwaddr addr,
1203 uint64_t *pval,
1204 unsigned size,
1205 MemTxAttrs attrs)
1207 MemTxResult r;
1209 if (!memory_region_access_valid(mr, addr, size, false)) {
1210 *pval = unassigned_mem_read(mr, addr, size);
1211 return MEMTX_DECODE_ERROR;
1214 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1215 adjust_endianness(mr, pval, size);
1216 return r;
1219 /* Return true if an eventfd was signalled */
1220 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1221 hwaddr addr,
1222 uint64_t data,
1223 unsigned size,
1224 MemTxAttrs attrs)
1226 MemoryRegionIoeventfd ioeventfd = {
1227 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1228 .data = data,
1230 unsigned i;
1232 for (i = 0; i < mr->ioeventfd_nb; i++) {
1233 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1234 ioeventfd.e = mr->ioeventfds[i].e;
1236 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1237 event_notifier_set(ioeventfd.e);
1238 return true;
1242 return false;
1245 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1246 hwaddr addr,
1247 uint64_t data,
1248 unsigned size,
1249 MemTxAttrs attrs)
1251 if (!memory_region_access_valid(mr, addr, size, true)) {
1252 unassigned_mem_write(mr, addr, data, size);
1253 return MEMTX_DECODE_ERROR;
1256 adjust_endianness(mr, &data, size);
1258 if ((!kvm_eventfds_enabled()) &&
1259 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1260 return MEMTX_OK;
1263 if (mr->ops->write) {
1264 return access_with_adjusted_size(addr, &data, size,
1265 mr->ops->impl.min_access_size,
1266 mr->ops->impl.max_access_size,
1267 memory_region_write_accessor, mr,
1268 attrs);
1269 } else if (mr->ops->write_with_attrs) {
1270 return
1271 access_with_adjusted_size(addr, &data, size,
1272 mr->ops->impl.min_access_size,
1273 mr->ops->impl.max_access_size,
1274 memory_region_write_with_attrs_accessor,
1275 mr, attrs);
1276 } else {
1277 return access_with_adjusted_size(addr, &data, size, 1, 4,
1278 memory_region_oldmmio_write_accessor,
1279 mr, attrs);
1283 void memory_region_init_io(MemoryRegion *mr,
1284 Object *owner,
1285 const MemoryRegionOps *ops,
1286 void *opaque,
1287 const char *name,
1288 uint64_t size)
1290 memory_region_init(mr, owner, name, size);
1291 mr->ops = ops ? ops : &unassigned_mem_ops;
1292 mr->opaque = opaque;
1293 mr->terminates = true;
1296 void memory_region_init_ram(MemoryRegion *mr,
1297 Object *owner,
1298 const char *name,
1299 uint64_t size,
1300 Error **errp)
1302 memory_region_init(mr, owner, name, size);
1303 mr->ram = true;
1304 mr->terminates = true;
1305 mr->destructor = memory_region_destructor_ram;
1306 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1307 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1310 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1311 Object *owner,
1312 const char *name,
1313 uint64_t size,
1314 uint64_t max_size,
1315 void (*resized)(const char*,
1316 uint64_t length,
1317 void *host),
1318 Error **errp)
1320 memory_region_init(mr, owner, name, size);
1321 mr->ram = true;
1322 mr->terminates = true;
1323 mr->destructor = memory_region_destructor_ram;
1324 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1325 mr, errp);
1326 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1329 #ifdef __linux__
1330 void memory_region_init_ram_from_file(MemoryRegion *mr,
1331 struct Object *owner,
1332 const char *name,
1333 uint64_t size,
1334 bool share,
1335 const char *path,
1336 Error **errp)
1338 memory_region_init(mr, owner, name, size);
1339 mr->ram = true;
1340 mr->terminates = true;
1341 mr->destructor = memory_region_destructor_ram;
1342 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1343 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1345 #endif
1347 void memory_region_init_ram_ptr(MemoryRegion *mr,
1348 Object *owner,
1349 const char *name,
1350 uint64_t size,
1351 void *ptr)
1353 memory_region_init(mr, owner, name, size);
1354 mr->ram = true;
1355 mr->terminates = true;
1356 mr->destructor = memory_region_destructor_ram;
1357 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1359 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1360 assert(ptr != NULL);
1361 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1364 void memory_region_set_skip_dump(MemoryRegion *mr)
1366 mr->skip_dump = true;
1369 void memory_region_init_alias(MemoryRegion *mr,
1370 Object *owner,
1371 const char *name,
1372 MemoryRegion *orig,
1373 hwaddr offset,
1374 uint64_t size)
1376 memory_region_init(mr, owner, name, size);
1377 mr->alias = orig;
1378 mr->alias_offset = offset;
1381 void memory_region_init_rom_device(MemoryRegion *mr,
1382 Object *owner,
1383 const MemoryRegionOps *ops,
1384 void *opaque,
1385 const char *name,
1386 uint64_t size,
1387 Error **errp)
1389 memory_region_init(mr, owner, name, size);
1390 mr->ops = ops;
1391 mr->opaque = opaque;
1392 mr->terminates = true;
1393 mr->rom_device = true;
1394 mr->destructor = memory_region_destructor_rom_device;
1395 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1398 void memory_region_init_iommu(MemoryRegion *mr,
1399 Object *owner,
1400 const MemoryRegionIOMMUOps *ops,
1401 const char *name,
1402 uint64_t size)
1404 memory_region_init(mr, owner, name, size);
1405 mr->iommu_ops = ops,
1406 mr->terminates = true; /* then re-forwards */
1407 notifier_list_init(&mr->iommu_notify);
1410 static void memory_region_finalize(Object *obj)
1412 MemoryRegion *mr = MEMORY_REGION(obj);
1414 assert(!mr->container);
1416 /* We know the region is not visible in any address space (it
1417 * does not have a container and cannot be a root either because
1418 * it has no references, so we can blindly clear mr->enabled.
1419 * memory_region_set_enabled instead could trigger a transaction
1420 * and cause an infinite loop.
1422 mr->enabled = false;
1423 memory_region_transaction_begin();
1424 while (!QTAILQ_EMPTY(&mr->subregions)) {
1425 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1426 memory_region_del_subregion(mr, subregion);
1428 memory_region_transaction_commit();
1430 mr->destructor(mr);
1431 memory_region_clear_coalescing(mr);
1432 g_free((char *)mr->name);
1433 g_free(mr->ioeventfds);
1436 Object *memory_region_owner(MemoryRegion *mr)
1438 Object *obj = OBJECT(mr);
1439 return obj->parent;
1442 void memory_region_ref(MemoryRegion *mr)
1444 /* MMIO callbacks most likely will access data that belongs
1445 * to the owner, hence the need to ref/unref the owner whenever
1446 * the memory region is in use.
1448 * The memory region is a child of its owner. As long as the
1449 * owner doesn't call unparent itself on the memory region,
1450 * ref-ing the owner will also keep the memory region alive.
1451 * Memory regions without an owner are supposed to never go away;
1452 * we do not ref/unref them because it slows down DMA sensibly.
1454 if (mr && mr->owner) {
1455 object_ref(mr->owner);
1459 void memory_region_unref(MemoryRegion *mr)
1461 if (mr && mr->owner) {
1462 object_unref(mr->owner);
1466 uint64_t memory_region_size(MemoryRegion *mr)
1468 if (int128_eq(mr->size, int128_2_64())) {
1469 return UINT64_MAX;
1471 return int128_get64(mr->size);
1474 const char *memory_region_name(const MemoryRegion *mr)
1476 if (!mr->name) {
1477 ((MemoryRegion *)mr)->name =
1478 object_get_canonical_path_component(OBJECT(mr));
1480 return mr->name;
1483 bool memory_region_is_skip_dump(MemoryRegion *mr)
1485 return mr->skip_dump;
1488 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1490 uint8_t mask = mr->dirty_log_mask;
1491 if (global_dirty_log) {
1492 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1494 return mask;
1497 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1499 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1502 void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
1504 notifier_list_add(&mr->iommu_notify, n);
1507 void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n,
1508 hwaddr granularity, bool is_write)
1510 hwaddr addr;
1511 IOMMUTLBEntry iotlb;
1513 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1514 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
1515 if (iotlb.perm != IOMMU_NONE) {
1516 n->notify(n, &iotlb);
1519 /* if (2^64 - MR size) < granularity, it's possible to get an
1520 * infinite loop here. This should catch such a wraparound */
1521 if ((addr + granularity) < addr) {
1522 break;
1527 void memory_region_unregister_iommu_notifier(Notifier *n)
1529 notifier_remove(n);
1532 void memory_region_notify_iommu(MemoryRegion *mr,
1533 IOMMUTLBEntry entry)
1535 assert(memory_region_is_iommu(mr));
1536 notifier_list_notify(&mr->iommu_notify, &entry);
1539 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1541 uint8_t mask = 1 << client;
1542 uint8_t old_logging;
1544 assert(client == DIRTY_MEMORY_VGA);
1545 old_logging = mr->vga_logging_count;
1546 mr->vga_logging_count += log ? 1 : -1;
1547 if (!!old_logging == !!mr->vga_logging_count) {
1548 return;
1551 memory_region_transaction_begin();
1552 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1553 memory_region_update_pending |= mr->enabled;
1554 memory_region_transaction_commit();
1557 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1558 hwaddr size, unsigned client)
1560 assert(mr->ram_block);
1561 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1562 size, client);
1565 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1566 hwaddr size)
1568 assert(mr->ram_block);
1569 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1570 size,
1571 memory_region_get_dirty_log_mask(mr));
1574 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1575 hwaddr size, unsigned client)
1577 assert(mr->ram_block);
1578 return cpu_physical_memory_test_and_clear_dirty(
1579 memory_region_get_ram_addr(mr) + addr, size, client);
1583 void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1585 AddressSpace *as;
1586 FlatRange *fr;
1588 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1589 FlatView *view = address_space_get_flatview(as);
1590 FOR_EACH_FLAT_RANGE(fr, view) {
1591 if (fr->mr == mr) {
1592 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
1595 flatview_unref(view);
1599 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1601 if (mr->readonly != readonly) {
1602 memory_region_transaction_begin();
1603 mr->readonly = readonly;
1604 memory_region_update_pending |= mr->enabled;
1605 memory_region_transaction_commit();
1609 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
1611 if (mr->romd_mode != romd_mode) {
1612 memory_region_transaction_begin();
1613 mr->romd_mode = romd_mode;
1614 memory_region_update_pending |= mr->enabled;
1615 memory_region_transaction_commit();
1619 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1620 hwaddr size, unsigned client)
1622 assert(mr->ram_block);
1623 cpu_physical_memory_test_and_clear_dirty(
1624 memory_region_get_ram_addr(mr) + addr, size, client);
1627 int memory_region_get_fd(MemoryRegion *mr)
1629 int fd;
1631 rcu_read_lock();
1632 while (mr->alias) {
1633 mr = mr->alias;
1635 fd = mr->ram_block->fd;
1636 rcu_read_unlock();
1638 return fd;
1641 void memory_region_set_fd(MemoryRegion *mr, int fd)
1643 rcu_read_lock();
1644 while (mr->alias) {
1645 mr = mr->alias;
1647 mr->ram_block->fd = fd;
1648 rcu_read_unlock();
1651 void *memory_region_get_ram_ptr(MemoryRegion *mr)
1653 void *ptr;
1654 uint64_t offset = 0;
1656 rcu_read_lock();
1657 while (mr->alias) {
1658 offset += mr->alias_offset;
1659 mr = mr->alias;
1661 assert(mr->ram_block);
1662 ptr = qemu_get_ram_ptr(mr->ram_block, memory_region_get_ram_addr(mr));
1663 rcu_read_unlock();
1665 return ptr + offset;
1668 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1670 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1673 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1675 assert(mr->ram_block);
1677 qemu_ram_resize(mr->ram_block, newsize, errp);
1680 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
1682 FlatView *view;
1683 FlatRange *fr;
1684 CoalescedMemoryRange *cmr;
1685 AddrRange tmp;
1686 MemoryRegionSection section;
1688 view = address_space_get_flatview(as);
1689 FOR_EACH_FLAT_RANGE(fr, view) {
1690 if (fr->mr == mr) {
1691 section = (MemoryRegionSection) {
1692 .address_space = as,
1693 .offset_within_address_space = int128_get64(fr->addr.start),
1694 .size = fr->addr.size,
1697 MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
1698 int128_get64(fr->addr.start),
1699 int128_get64(fr->addr.size));
1700 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1701 tmp = addrrange_shift(cmr->addr,
1702 int128_sub(fr->addr.start,
1703 int128_make64(fr->offset_in_region)));
1704 if (!addrrange_intersects(tmp, fr->addr)) {
1705 continue;
1707 tmp = addrrange_intersection(tmp, fr->addr);
1708 MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
1709 int128_get64(tmp.start),
1710 int128_get64(tmp.size));
1714 flatview_unref(view);
1717 static void memory_region_update_coalesced_range(MemoryRegion *mr)
1719 AddressSpace *as;
1721 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1722 memory_region_update_coalesced_range_as(mr, as);
1726 void memory_region_set_coalescing(MemoryRegion *mr)
1728 memory_region_clear_coalescing(mr);
1729 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1732 void memory_region_add_coalescing(MemoryRegion *mr,
1733 hwaddr offset,
1734 uint64_t size)
1736 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
1738 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
1739 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1740 memory_region_update_coalesced_range(mr);
1741 memory_region_set_flush_coalesced(mr);
1744 void memory_region_clear_coalescing(MemoryRegion *mr)
1746 CoalescedMemoryRange *cmr;
1747 bool updated = false;
1749 qemu_flush_coalesced_mmio_buffer();
1750 mr->flush_coalesced_mmio = false;
1752 while (!QTAILQ_EMPTY(&mr->coalesced)) {
1753 cmr = QTAILQ_FIRST(&mr->coalesced);
1754 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
1755 g_free(cmr);
1756 updated = true;
1759 if (updated) {
1760 memory_region_update_coalesced_range(mr);
1764 void memory_region_set_flush_coalesced(MemoryRegion *mr)
1766 mr->flush_coalesced_mmio = true;
1769 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
1771 qemu_flush_coalesced_mmio_buffer();
1772 if (QTAILQ_EMPTY(&mr->coalesced)) {
1773 mr->flush_coalesced_mmio = false;
1777 void memory_region_set_global_locking(MemoryRegion *mr)
1779 mr->global_locking = true;
1782 void memory_region_clear_global_locking(MemoryRegion *mr)
1784 mr->global_locking = false;
1787 static bool userspace_eventfd_warning;
1789 void memory_region_add_eventfd(MemoryRegion *mr,
1790 hwaddr addr,
1791 unsigned size,
1792 bool match_data,
1793 uint64_t data,
1794 EventNotifier *e)
1796 MemoryRegionIoeventfd mrfd = {
1797 .addr.start = int128_make64(addr),
1798 .addr.size = int128_make64(size),
1799 .match_data = match_data,
1800 .data = data,
1801 .e = e,
1803 unsigned i;
1805 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
1806 userspace_eventfd_warning))) {
1807 userspace_eventfd_warning = true;
1808 error_report("Using eventfd without MMIO binding in KVM. "
1809 "Suboptimal performance expected");
1812 if (size) {
1813 adjust_endianness(mr, &mrfd.data, size);
1815 memory_region_transaction_begin();
1816 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1817 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1818 break;
1821 ++mr->ioeventfd_nb;
1822 mr->ioeventfds = g_realloc(mr->ioeventfds,
1823 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1824 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1825 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1826 mr->ioeventfds[i] = mrfd;
1827 ioeventfd_update_pending |= mr->enabled;
1828 memory_region_transaction_commit();
1831 void memory_region_del_eventfd(MemoryRegion *mr,
1832 hwaddr addr,
1833 unsigned size,
1834 bool match_data,
1835 uint64_t data,
1836 EventNotifier *e)
1838 MemoryRegionIoeventfd mrfd = {
1839 .addr.start = int128_make64(addr),
1840 .addr.size = int128_make64(size),
1841 .match_data = match_data,
1842 .data = data,
1843 .e = e,
1845 unsigned i;
1847 if (size) {
1848 adjust_endianness(mr, &mrfd.data, size);
1850 memory_region_transaction_begin();
1851 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1852 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1853 break;
1856 assert(i != mr->ioeventfd_nb);
1857 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1858 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1859 --mr->ioeventfd_nb;
1860 mr->ioeventfds = g_realloc(mr->ioeventfds,
1861 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1862 ioeventfd_update_pending |= mr->enabled;
1863 memory_region_transaction_commit();
1866 static void memory_region_update_container_subregions(MemoryRegion *subregion)
1868 MemoryRegion *mr = subregion->container;
1869 MemoryRegion *other;
1871 memory_region_transaction_begin();
1873 memory_region_ref(subregion);
1874 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1875 if (subregion->priority >= other->priority) {
1876 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1877 goto done;
1880 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1881 done:
1882 memory_region_update_pending |= mr->enabled && subregion->enabled;
1883 memory_region_transaction_commit();
1886 static void memory_region_add_subregion_common(MemoryRegion *mr,
1887 hwaddr offset,
1888 MemoryRegion *subregion)
1890 assert(!subregion->container);
1891 subregion->container = mr;
1892 subregion->addr = offset;
1893 memory_region_update_container_subregions(subregion);
1896 void memory_region_add_subregion(MemoryRegion *mr,
1897 hwaddr offset,
1898 MemoryRegion *subregion)
1900 subregion->priority = 0;
1901 memory_region_add_subregion_common(mr, offset, subregion);
1904 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1905 hwaddr offset,
1906 MemoryRegion *subregion,
1907 int priority)
1909 subregion->priority = priority;
1910 memory_region_add_subregion_common(mr, offset, subregion);
1913 void memory_region_del_subregion(MemoryRegion *mr,
1914 MemoryRegion *subregion)
1916 memory_region_transaction_begin();
1917 assert(subregion->container == mr);
1918 subregion->container = NULL;
1919 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
1920 memory_region_unref(subregion);
1921 memory_region_update_pending |= mr->enabled && subregion->enabled;
1922 memory_region_transaction_commit();
1925 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
1927 if (enabled == mr->enabled) {
1928 return;
1930 memory_region_transaction_begin();
1931 mr->enabled = enabled;
1932 memory_region_update_pending = true;
1933 memory_region_transaction_commit();
1936 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
1938 Int128 s = int128_make64(size);
1940 if (size == UINT64_MAX) {
1941 s = int128_2_64();
1943 if (int128_eq(s, mr->size)) {
1944 return;
1946 memory_region_transaction_begin();
1947 mr->size = s;
1948 memory_region_update_pending = true;
1949 memory_region_transaction_commit();
1952 static void memory_region_readd_subregion(MemoryRegion *mr)
1954 MemoryRegion *container = mr->container;
1956 if (container) {
1957 memory_region_transaction_begin();
1958 memory_region_ref(mr);
1959 memory_region_del_subregion(container, mr);
1960 mr->container = container;
1961 memory_region_update_container_subregions(mr);
1962 memory_region_unref(mr);
1963 memory_region_transaction_commit();
1967 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
1969 if (addr != mr->addr) {
1970 mr->addr = addr;
1971 memory_region_readd_subregion(mr);
1975 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
1977 assert(mr->alias);
1979 if (offset == mr->alias_offset) {
1980 return;
1983 memory_region_transaction_begin();
1984 mr->alias_offset = offset;
1985 memory_region_update_pending |= mr->enabled;
1986 memory_region_transaction_commit();
1989 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
1991 return mr->align;
1994 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
1996 const AddrRange *addr = addr_;
1997 const FlatRange *fr = fr_;
1999 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2000 return -1;
2001 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2002 return 1;
2004 return 0;
2007 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2009 return bsearch(&addr, view->ranges, view->nr,
2010 sizeof(FlatRange), cmp_flatrange_addr);
2013 bool memory_region_is_mapped(MemoryRegion *mr)
2015 return mr->container ? true : false;
2018 /* Same as memory_region_find, but it does not add a reference to the
2019 * returned region. It must be called from an RCU critical section.
2021 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2022 hwaddr addr, uint64_t size)
2024 MemoryRegionSection ret = { .mr = NULL };
2025 MemoryRegion *root;
2026 AddressSpace *as;
2027 AddrRange range;
2028 FlatView *view;
2029 FlatRange *fr;
2031 addr += mr->addr;
2032 for (root = mr; root->container; ) {
2033 root = root->container;
2034 addr += root->addr;
2037 as = memory_region_to_address_space(root);
2038 if (!as) {
2039 return ret;
2041 range = addrrange_make(int128_make64(addr), int128_make64(size));
2043 view = atomic_rcu_read(&as->current_map);
2044 fr = flatview_lookup(view, range);
2045 if (!fr) {
2046 return ret;
2049 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2050 --fr;
2053 ret.mr = fr->mr;
2054 ret.address_space = as;
2055 range = addrrange_intersection(range, fr->addr);
2056 ret.offset_within_region = fr->offset_in_region;
2057 ret.offset_within_region += int128_get64(int128_sub(range.start,
2058 fr->addr.start));
2059 ret.size = range.size;
2060 ret.offset_within_address_space = int128_get64(range.start);
2061 ret.readonly = fr->readonly;
2062 return ret;
2065 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2066 hwaddr addr, uint64_t size)
2068 MemoryRegionSection ret;
2069 rcu_read_lock();
2070 ret = memory_region_find_rcu(mr, addr, size);
2071 if (ret.mr) {
2072 memory_region_ref(ret.mr);
2074 rcu_read_unlock();
2075 return ret;
2078 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2080 MemoryRegion *mr;
2082 rcu_read_lock();
2083 mr = memory_region_find_rcu(container, addr, 1).mr;
2084 rcu_read_unlock();
2085 return mr && mr != container;
2088 void address_space_sync_dirty_bitmap(AddressSpace *as)
2090 FlatView *view;
2091 FlatRange *fr;
2093 view = address_space_get_flatview(as);
2094 FOR_EACH_FLAT_RANGE(fr, view) {
2095 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
2097 flatview_unref(view);
2100 void memory_global_dirty_log_start(void)
2102 global_dirty_log = true;
2104 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2106 /* Refresh DIRTY_LOG_MIGRATION bit. */
2107 memory_region_transaction_begin();
2108 memory_region_update_pending = true;
2109 memory_region_transaction_commit();
2112 void memory_global_dirty_log_stop(void)
2114 global_dirty_log = false;
2116 /* Refresh DIRTY_LOG_MIGRATION bit. */
2117 memory_region_transaction_begin();
2118 memory_region_update_pending = true;
2119 memory_region_transaction_commit();
2121 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2124 static void listener_add_address_space(MemoryListener *listener,
2125 AddressSpace *as)
2127 FlatView *view;
2128 FlatRange *fr;
2130 if (listener->address_space_filter
2131 && listener->address_space_filter != as) {
2132 return;
2135 if (listener->begin) {
2136 listener->begin(listener);
2138 if (global_dirty_log) {
2139 if (listener->log_global_start) {
2140 listener->log_global_start(listener);
2144 view = address_space_get_flatview(as);
2145 FOR_EACH_FLAT_RANGE(fr, view) {
2146 MemoryRegionSection section = {
2147 .mr = fr->mr,
2148 .address_space = as,
2149 .offset_within_region = fr->offset_in_region,
2150 .size = fr->addr.size,
2151 .offset_within_address_space = int128_get64(fr->addr.start),
2152 .readonly = fr->readonly,
2154 if (fr->dirty_log_mask && listener->log_start) {
2155 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2157 if (listener->region_add) {
2158 listener->region_add(listener, &section);
2161 if (listener->commit) {
2162 listener->commit(listener);
2164 flatview_unref(view);
2167 void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
2169 MemoryListener *other = NULL;
2170 AddressSpace *as;
2172 listener->address_space_filter = filter;
2173 if (QTAILQ_EMPTY(&memory_listeners)
2174 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2175 memory_listeners)->priority) {
2176 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2177 } else {
2178 QTAILQ_FOREACH(other, &memory_listeners, link) {
2179 if (listener->priority < other->priority) {
2180 break;
2183 QTAILQ_INSERT_BEFORE(other, listener, link);
2186 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2187 listener_add_address_space(listener, as);
2191 void memory_listener_unregister(MemoryListener *listener)
2193 QTAILQ_REMOVE(&memory_listeners, listener, link);
2196 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2198 memory_region_ref(root);
2199 memory_region_transaction_begin();
2200 as->ref_count = 1;
2201 as->root = root;
2202 as->malloced = false;
2203 as->current_map = g_new(FlatView, 1);
2204 flatview_init(as->current_map);
2205 as->ioeventfd_nb = 0;
2206 as->ioeventfds = NULL;
2207 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2208 as->name = g_strdup(name ? name : "anonymous");
2209 address_space_init_dispatch(as);
2210 memory_region_update_pending |= root->enabled;
2211 memory_region_transaction_commit();
2214 static void do_address_space_destroy(AddressSpace *as)
2216 MemoryListener *listener;
2217 bool do_free = as->malloced;
2219 address_space_destroy_dispatch(as);
2221 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2222 assert(listener->address_space_filter != as);
2225 flatview_unref(as->current_map);
2226 g_free(as->name);
2227 g_free(as->ioeventfds);
2228 memory_region_unref(as->root);
2229 if (do_free) {
2230 g_free(as);
2234 AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2236 AddressSpace *as;
2238 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2239 if (root == as->root && as->malloced) {
2240 as->ref_count++;
2241 return as;
2245 as = g_malloc0(sizeof *as);
2246 address_space_init(as, root, name);
2247 as->malloced = true;
2248 return as;
2251 void address_space_destroy(AddressSpace *as)
2253 MemoryRegion *root = as->root;
2255 as->ref_count--;
2256 if (as->ref_count) {
2257 return;
2259 /* Flush out anything from MemoryListeners listening in on this */
2260 memory_region_transaction_begin();
2261 as->root = NULL;
2262 memory_region_transaction_commit();
2263 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2264 address_space_unregister(as);
2266 /* At this point, as->dispatch and as->current_map are dummy
2267 * entries that the guest should never use. Wait for the old
2268 * values to expire before freeing the data.
2270 as->root = root;
2271 call_rcu(as, do_address_space_destroy, rcu);
2274 typedef struct MemoryRegionList MemoryRegionList;
2276 struct MemoryRegionList {
2277 const MemoryRegion *mr;
2278 QTAILQ_ENTRY(MemoryRegionList) queue;
2281 typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
2283 static void mtree_print_mr(fprintf_function mon_printf, void *f,
2284 const MemoryRegion *mr, unsigned int level,
2285 hwaddr base,
2286 MemoryRegionListHead *alias_print_queue)
2288 MemoryRegionList *new_ml, *ml, *next_ml;
2289 MemoryRegionListHead submr_print_queue;
2290 const MemoryRegion *submr;
2291 unsigned int i;
2293 if (!mr) {
2294 return;
2297 for (i = 0; i < level; i++) {
2298 mon_printf(f, " ");
2301 if (mr->alias) {
2302 MemoryRegionList *ml;
2303 bool found = false;
2305 /* check if the alias is already in the queue */
2306 QTAILQ_FOREACH(ml, alias_print_queue, queue) {
2307 if (ml->mr == mr->alias) {
2308 found = true;
2312 if (!found) {
2313 ml = g_new(MemoryRegionList, 1);
2314 ml->mr = mr->alias;
2315 QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
2317 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2318 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
2319 "-" TARGET_FMT_plx "%s\n",
2320 base + mr->addr,
2321 base + mr->addr
2322 + (int128_nz(mr->size) ?
2323 (hwaddr)int128_get64(int128_sub(mr->size,
2324 int128_one())) : 0),
2325 mr->priority,
2326 mr->romd_mode ? 'R' : '-',
2327 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2328 : '-',
2329 memory_region_name(mr),
2330 memory_region_name(mr->alias),
2331 mr->alias_offset,
2332 mr->alias_offset
2333 + (int128_nz(mr->size) ?
2334 (hwaddr)int128_get64(int128_sub(mr->size,
2335 int128_one())) : 0),
2336 mr->enabled ? "" : " [disabled]");
2337 } else {
2338 mon_printf(f,
2339 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n",
2340 base + mr->addr,
2341 base + mr->addr
2342 + (int128_nz(mr->size) ?
2343 (hwaddr)int128_get64(int128_sub(mr->size,
2344 int128_one())) : 0),
2345 mr->priority,
2346 mr->romd_mode ? 'R' : '-',
2347 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2348 : '-',
2349 memory_region_name(mr),
2350 mr->enabled ? "" : " [disabled]");
2353 QTAILQ_INIT(&submr_print_queue);
2355 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2356 new_ml = g_new(MemoryRegionList, 1);
2357 new_ml->mr = submr;
2358 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2359 if (new_ml->mr->addr < ml->mr->addr ||
2360 (new_ml->mr->addr == ml->mr->addr &&
2361 new_ml->mr->priority > ml->mr->priority)) {
2362 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
2363 new_ml = NULL;
2364 break;
2367 if (new_ml) {
2368 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
2372 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2373 mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
2374 alias_print_queue);
2377 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
2378 g_free(ml);
2382 void mtree_info(fprintf_function mon_printf, void *f)
2384 MemoryRegionListHead ml_head;
2385 MemoryRegionList *ml, *ml2;
2386 AddressSpace *as;
2388 QTAILQ_INIT(&ml_head);
2390 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2391 mon_printf(f, "address-space: %s\n", as->name);
2392 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2393 mon_printf(f, "\n");
2396 /* print aliased regions */
2397 QTAILQ_FOREACH(ml, &ml_head, queue) {
2398 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2399 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2400 mon_printf(f, "\n");
2403 QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
2404 g_free(ml);
2408 static const TypeInfo memory_region_info = {
2409 .parent = TYPE_OBJECT,
2410 .name = TYPE_MEMORY_REGION,
2411 .instance_size = sizeof(MemoryRegion),
2412 .instance_init = memory_region_initfn,
2413 .instance_finalize = memory_region_finalize,
2416 static void memory_register_types(void)
2418 type_register_static(&memory_region_info);
2421 type_init(memory_register_types)