2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
20 #include "exec/exec-all.h" /* qemu_sprint_backtrace */
21 #include "exec/memory.h"
22 #include "exec/address-spaces.h"
23 #include "exec/ioport.h"
24 #include "qapi/visitor.h"
25 #include "qemu/bitops.h"
26 #include "qemu/error-report.h"
27 #include "qom/object.h"
28 #include "trace-root.h"
30 #include "exec/memory-internal.h"
31 #include "exec/ram_addr.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
35 //#define DEBUG_UNASSIGNED
37 static unsigned memory_region_transaction_depth
;
38 static bool memory_region_update_pending
;
39 static bool ioeventfd_update_pending
;
40 static bool global_dirty_log
= false;
42 static QTAILQ_HEAD(memory_listeners
, MemoryListener
) memory_listeners
43 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
45 static QTAILQ_HEAD(, AddressSpace
) address_spaces
46 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
48 typedef struct AddrRange AddrRange
;
51 * Note that signed integers are needed for negative offsetting in aliases
52 * (large MemoryRegion::alias_offset).
59 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
61 return (AddrRange
) { start
, size
};
64 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
66 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
69 static Int128
addrrange_end(AddrRange r
)
71 return int128_add(r
.start
, r
.size
);
74 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
76 int128_addto(&range
.start
, delta
);
80 static bool addrrange_contains(AddrRange range
, Int128 addr
)
82 return int128_ge(addr
, range
.start
)
83 && int128_lt(addr
, addrrange_end(range
));
86 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
88 return addrrange_contains(r1
, r2
.start
)
89 || addrrange_contains(r2
, r1
.start
);
92 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
94 Int128 start
= int128_max(r1
.start
, r2
.start
);
95 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
96 return addrrange_make(start
, int128_sub(end
, start
));
99 enum ListenerDirection
{ Forward
, Reverse
};
101 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
103 MemoryListener *_listener; \
105 switch (_direction) { \
107 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
108 if (_listener->_callback) { \
109 _listener->_callback(_listener, ##_args); \
114 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
115 memory_listeners, link) { \
116 if (_listener->_callback) { \
117 _listener->_callback(_listener, ##_args); \
126 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
128 MemoryListener *_listener; \
129 struct memory_listeners_as *list = &(_as)->listeners; \
131 switch (_direction) { \
133 QTAILQ_FOREACH(_listener, list, link_as) { \
134 if (_listener->_callback) { \
135 _listener->_callback(_listener, _section, ##_args); \
140 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
142 if (_listener->_callback) { \
143 _listener->_callback(_listener, _section, ##_args); \
152 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
153 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
155 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
156 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
159 struct CoalescedMemoryRange
{
161 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
164 struct MemoryRegionIoeventfd
{
171 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
172 MemoryRegionIoeventfd b
)
174 if (int128_lt(a
.addr
.start
, b
.addr
.start
)) {
176 } else if (int128_gt(a
.addr
.start
, b
.addr
.start
)) {
178 } else if (int128_lt(a
.addr
.size
, b
.addr
.size
)) {
180 } else if (int128_gt(a
.addr
.size
, b
.addr
.size
)) {
182 } else if (a
.match_data
< b
.match_data
) {
184 } else if (a
.match_data
> b
.match_data
) {
186 } else if (a
.match_data
) {
187 if (a
.data
< b
.data
) {
189 } else if (a
.data
> b
.data
) {
195 } else if (a
.e
> b
.e
) {
201 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
202 MemoryRegionIoeventfd b
)
204 return !memory_region_ioeventfd_before(a
, b
)
205 && !memory_region_ioeventfd_before(b
, a
);
208 typedef struct FlatRange FlatRange
;
209 typedef struct FlatView FlatView
;
211 /* Range of memory in the global map. Addresses are absolute. */
214 hwaddr offset_in_region
;
216 uint8_t dirty_log_mask
;
221 /* Flattened global view of current active memory hierarchy. Kept in sorted
229 unsigned nr_allocated
;
232 typedef struct AddressSpaceOps AddressSpaceOps
;
234 #define FOR_EACH_FLAT_RANGE(var, view) \
235 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
237 static inline MemoryRegionSection
238 section_from_flat_range(FlatRange
*fr
, AddressSpace
*as
)
240 return (MemoryRegionSection
) {
243 .offset_within_region
= fr
->offset_in_region
,
244 .size
= fr
->addr
.size
,
245 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
246 .readonly
= fr
->readonly
,
250 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
252 return a
->mr
== b
->mr
253 && addrrange_equal(a
->addr
, b
->addr
)
254 && a
->offset_in_region
== b
->offset_in_region
255 && a
->romd_mode
== b
->romd_mode
256 && a
->readonly
== b
->readonly
;
259 static void flatview_init(FlatView
*view
)
264 view
->nr_allocated
= 0;
267 /* Insert a range into a given position. Caller is responsible for maintaining
270 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
272 if (view
->nr
== view
->nr_allocated
) {
273 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
274 view
->ranges
= g_realloc(view
->ranges
,
275 view
->nr_allocated
* sizeof(*view
->ranges
));
277 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
278 (view
->nr
- pos
) * sizeof(FlatRange
));
279 view
->ranges
[pos
] = *range
;
280 memory_region_ref(range
->mr
);
284 static void flatview_destroy(FlatView
*view
)
288 for (i
= 0; i
< view
->nr
; i
++) {
289 memory_region_unref(view
->ranges
[i
].mr
);
291 g_free(view
->ranges
);
295 static void flatview_ref(FlatView
*view
)
297 atomic_inc(&view
->ref
);
300 static void flatview_unref(FlatView
*view
)
302 if (atomic_fetch_dec(&view
->ref
) == 1) {
303 flatview_destroy(view
);
307 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
309 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
311 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
313 int128_make64(r2
->offset_in_region
))
314 && r1
->dirty_log_mask
== r2
->dirty_log_mask
315 && r1
->romd_mode
== r2
->romd_mode
316 && r1
->readonly
== r2
->readonly
;
319 /* Attempt to simplify a view by merging adjacent ranges */
320 static void flatview_simplify(FlatView
*view
)
325 while (i
< view
->nr
) {
328 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
329 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
333 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
334 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
339 static bool memory_region_big_endian(MemoryRegion
*mr
)
341 #ifdef TARGET_WORDS_BIGENDIAN
342 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
344 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
348 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
350 #ifdef TARGET_WORDS_BIGENDIAN
351 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
353 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
357 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
359 if (memory_region_wrong_endianness(mr
)) {
364 *data
= bswap16(*data
);
367 *data
= bswap32(*data
);
370 *data
= bswap64(*data
);
378 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
381 hwaddr abs_addr
= offset
;
383 abs_addr
+= mr
->addr
;
384 for (root
= mr
; root
->container
; ) {
385 root
= root
->container
;
386 abs_addr
+= root
->addr
;
392 static int get_cpu_index(void)
395 return current_cpu
->cpu_index
;
400 static MemTxResult
memory_region_oldmmio_read_accessor(MemoryRegion
*mr
,
410 tmp
= mr
->ops
->old_mmio
.read
[ctz32(size
)](mr
->opaque
, addr
);
412 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
413 } else if (mr
== &io_mem_notdirty
) {
414 /* Accesses to code which has previously been translated into a TB show
415 * up in the MMIO path, as accesses to the io_mem_notdirty
417 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
418 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
419 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
420 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
422 *value
|= (tmp
& mask
) << shift
;
426 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
436 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
438 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
439 } else if (mr
== &io_mem_notdirty
) {
440 /* Accesses to code which has previously been translated into a TB show
441 * up in the MMIO path, as accesses to the io_mem_notdirty
443 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
444 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
445 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
446 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
448 *value
|= (tmp
& mask
) << shift
;
452 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
463 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
465 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
466 } else if (mr
== &io_mem_notdirty
) {
467 /* Accesses to code which has previously been translated into a TB show
468 * up in the MMIO path, as accesses to the io_mem_notdirty
470 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
471 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
472 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
473 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
475 *value
|= (tmp
& mask
) << shift
;
479 static MemTxResult
memory_region_oldmmio_write_accessor(MemoryRegion
*mr
,
489 tmp
= (*value
>> shift
) & mask
;
491 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
492 } else if (mr
== &io_mem_notdirty
) {
493 /* Accesses to code which has previously been translated into a TB show
494 * up in the MMIO path, as accesses to the io_mem_notdirty
496 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
497 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
498 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
499 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
501 mr
->ops
->old_mmio
.write
[ctz32(size
)](mr
->opaque
, addr
, tmp
);
505 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
515 tmp
= (*value
>> shift
) & mask
;
517 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
518 } else if (mr
== &io_mem_notdirty
) {
519 /* Accesses to code which has previously been translated into a TB show
520 * up in the MMIO path, as accesses to the io_mem_notdirty
522 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
523 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
524 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
525 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
527 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
531 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
541 tmp
= (*value
>> shift
) & mask
;
543 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
544 } else if (mr
== &io_mem_notdirty
) {
545 /* Accesses to code which has previously been translated into a TB show
546 * up in the MMIO path, as accesses to the io_mem_notdirty
548 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
549 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
550 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
551 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
553 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
556 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
559 unsigned access_size_min
,
560 unsigned access_size_max
,
561 MemTxResult (*access
)(MemoryRegion
*mr
,
571 uint64_t access_mask
;
572 unsigned access_size
;
574 MemTxResult r
= MEMTX_OK
;
576 if (!access_size_min
) {
579 if (!access_size_max
) {
583 /* FIXME: support unaligned access? */
584 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
585 access_mask
= -1ULL >> (64 - access_size
* 8);
586 if (memory_region_big_endian(mr
)) {
587 for (i
= 0; i
< size
; i
+= access_size
) {
588 r
|= access(mr
, addr
+ i
, value
, access_size
,
589 (size
- access_size
- i
) * 8, access_mask
, attrs
);
592 for (i
= 0; i
< size
; i
+= access_size
) {
593 r
|= access(mr
, addr
+ i
, value
, access_size
, i
* 8,
600 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
604 while (mr
->container
) {
607 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
608 if (mr
== as
->root
) {
615 /* Render a memory region into the global view. Ranges in @view obscure
618 static void render_memory_region(FlatView
*view
,
624 MemoryRegion
*subregion
;
626 hwaddr offset_in_region
;
636 int128_addto(&base
, int128_make64(mr
->addr
));
637 readonly
|= mr
->readonly
;
639 tmp
= addrrange_make(base
, mr
->size
);
641 if (!addrrange_intersects(tmp
, clip
)) {
645 clip
= addrrange_intersection(tmp
, clip
);
648 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
649 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
650 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
654 /* Render subregions in priority order. */
655 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
656 render_memory_region(view
, subregion
, base
, clip
, readonly
);
659 if (!mr
->terminates
) {
663 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
668 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
669 fr
.romd_mode
= mr
->romd_mode
;
670 fr
.readonly
= readonly
;
672 /* Render the region itself into any gaps left by the current view. */
673 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
674 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
677 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
678 now
= int128_min(remain
,
679 int128_sub(view
->ranges
[i
].addr
.start
, base
));
680 fr
.offset_in_region
= offset_in_region
;
681 fr
.addr
= addrrange_make(base
, now
);
682 flatview_insert(view
, i
, &fr
);
684 int128_addto(&base
, now
);
685 offset_in_region
+= int128_get64(now
);
686 int128_subfrom(&remain
, now
);
688 now
= int128_sub(int128_min(int128_add(base
, remain
),
689 addrrange_end(view
->ranges
[i
].addr
)),
691 int128_addto(&base
, now
);
692 offset_in_region
+= int128_get64(now
);
693 int128_subfrom(&remain
, now
);
695 if (int128_nz(remain
)) {
696 fr
.offset_in_region
= offset_in_region
;
697 fr
.addr
= addrrange_make(base
, remain
);
698 flatview_insert(view
, i
, &fr
);
702 /* Render a memory topology into a list of disjoint absolute ranges. */
703 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
707 view
= g_new(FlatView
, 1);
711 render_memory_region(view
, mr
, int128_zero(),
712 addrrange_make(int128_zero(), int128_2_64()), false);
714 flatview_simplify(view
);
719 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
720 MemoryRegionIoeventfd
*fds_new
,
722 MemoryRegionIoeventfd
*fds_old
,
726 MemoryRegionIoeventfd
*fd
;
727 MemoryRegionSection section
;
729 /* Generate a symmetric difference of the old and new fd sets, adding
730 * and deleting as necessary.
734 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
735 if (iold
< fds_old_nb
736 && (inew
== fds_new_nb
737 || memory_region_ioeventfd_before(fds_old
[iold
],
740 section
= (MemoryRegionSection
) {
742 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
743 .size
= fd
->addr
.size
,
745 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
746 fd
->match_data
, fd
->data
, fd
->e
);
748 } else if (inew
< fds_new_nb
749 && (iold
== fds_old_nb
750 || memory_region_ioeventfd_before(fds_new
[inew
],
753 section
= (MemoryRegionSection
) {
755 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
756 .size
= fd
->addr
.size
,
758 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
759 fd
->match_data
, fd
->data
, fd
->e
);
768 static FlatView
*address_space_get_flatview(AddressSpace
*as
)
773 view
= atomic_rcu_read(&as
->current_map
);
779 static void address_space_update_ioeventfds(AddressSpace
*as
)
783 unsigned ioeventfd_nb
= 0;
784 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
788 view
= address_space_get_flatview(as
);
789 FOR_EACH_FLAT_RANGE(fr
, view
) {
790 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
791 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
792 int128_sub(fr
->addr
.start
,
793 int128_make64(fr
->offset_in_region
)));
794 if (addrrange_intersects(fr
->addr
, tmp
)) {
796 ioeventfds
= g_realloc(ioeventfds
,
797 ioeventfd_nb
* sizeof(*ioeventfds
));
798 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
799 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
804 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
805 as
->ioeventfds
, as
->ioeventfd_nb
);
807 g_free(as
->ioeventfds
);
808 as
->ioeventfds
= ioeventfds
;
809 as
->ioeventfd_nb
= ioeventfd_nb
;
810 flatview_unref(view
);
813 static void address_space_update_topology_pass(AddressSpace
*as
,
814 const FlatView
*old_view
,
815 const FlatView
*new_view
,
819 FlatRange
*frold
, *frnew
;
821 /* Generate a symmetric difference of the old and new memory maps.
822 * Kill ranges in the old map, and instantiate ranges in the new map.
825 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
826 if (iold
< old_view
->nr
) {
827 frold
= &old_view
->ranges
[iold
];
831 if (inew
< new_view
->nr
) {
832 frnew
= &new_view
->ranges
[inew
];
839 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
840 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
841 && !flatrange_equal(frold
, frnew
)))) {
842 /* In old but not in new, or in both but attributes changed. */
845 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
849 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
850 /* In both and unchanged (except logging may have changed) */
853 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
854 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
855 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
856 frold
->dirty_log_mask
,
857 frnew
->dirty_log_mask
);
859 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
860 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
861 frold
->dirty_log_mask
,
862 frnew
->dirty_log_mask
);
872 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
881 static void address_space_update_topology(AddressSpace
*as
)
883 FlatView
*old_view
= address_space_get_flatview(as
);
884 FlatView
*new_view
= generate_memory_topology(as
->root
);
886 address_space_update_topology_pass(as
, old_view
, new_view
, false);
887 address_space_update_topology_pass(as
, old_view
, new_view
, true);
889 /* Writes are protected by the BQL. */
890 atomic_rcu_set(&as
->current_map
, new_view
);
891 call_rcu(old_view
, flatview_unref
, rcu
);
893 /* Note that all the old MemoryRegions are still alive up to this
894 * point. This relieves most MemoryListeners from the need to
895 * ref/unref the MemoryRegions they get---unless they use them
896 * outside the iothread mutex, in which case precise reference
897 * counting is necessary.
899 flatview_unref(old_view
);
901 address_space_update_ioeventfds(as
);
904 void memory_region_transaction_begin(void)
906 qemu_flush_coalesced_mmio_buffer();
907 ++memory_region_transaction_depth
;
910 void memory_region_transaction_commit(void)
914 assert(memory_region_transaction_depth
);
915 assert(qemu_mutex_iothread_locked());
917 --memory_region_transaction_depth
;
918 if (!memory_region_transaction_depth
) {
919 if (memory_region_update_pending
) {
920 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
922 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
923 address_space_update_topology(as
);
925 memory_region_update_pending
= false;
926 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
927 } else if (ioeventfd_update_pending
) {
928 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
929 address_space_update_ioeventfds(as
);
931 ioeventfd_update_pending
= false;
936 static void memory_region_destructor_none(MemoryRegion
*mr
)
940 static void memory_region_destructor_ram(MemoryRegion
*mr
)
942 qemu_ram_free(mr
->ram_block
);
945 static bool memory_region_need_escape(char c
)
947 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
950 static char *memory_region_escape_name(const char *name
)
957 for (p
= name
; *p
; p
++) {
958 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
960 if (bytes
== p
- name
) {
961 return g_memdup(name
, bytes
+ 1);
964 escaped
= g_malloc(bytes
+ 1);
965 for (p
= name
, q
= escaped
; *p
; p
++) {
967 if (unlikely(memory_region_need_escape(c
))) {
970 *q
++ = "0123456789abcdef"[c
>> 4];
971 c
= "0123456789abcdef"[c
& 15];
979 void memory_region_init(MemoryRegion
*mr
,
984 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
985 mr
->size
= int128_make64(size
);
986 if (size
== UINT64_MAX
) {
987 mr
->size
= int128_2_64();
989 mr
->name
= g_strdup(name
);
991 mr
->ram_block
= NULL
;
994 char *escaped_name
= memory_region_escape_name(name
);
995 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
998 owner
= container_get(qdev_get_machine(), "/unattached");
1001 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
1002 object_unref(OBJECT(mr
));
1004 g_free(escaped_name
);
1008 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, const char *name
,
1009 void *opaque
, Error
**errp
)
1011 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1012 uint64_t value
= mr
->addr
;
1014 visit_type_uint64(v
, name
, &value
, errp
);
1017 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1018 const char *name
, void *opaque
,
1021 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1022 gchar
*path
= (gchar
*)"";
1024 if (mr
->container
) {
1025 path
= object_get_canonical_path(OBJECT(mr
->container
));
1027 visit_type_str(v
, name
, &path
, errp
);
1028 if (mr
->container
) {
1033 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1036 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1038 return OBJECT(mr
->container
);
1041 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1042 const char *name
, void *opaque
,
1045 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1046 int32_t value
= mr
->priority
;
1048 visit_type_int32(v
, name
, &value
, errp
);
1051 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1052 void *opaque
, Error
**errp
)
1054 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1055 uint64_t value
= memory_region_size(mr
);
1057 visit_type_uint64(v
, name
, &value
, errp
);
1060 static void memory_region_initfn(Object
*obj
)
1062 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1065 mr
->ops
= &unassigned_mem_ops
;
1067 mr
->romd_mode
= true;
1068 mr
->global_locking
= true;
1069 mr
->destructor
= memory_region_destructor_none
;
1070 QTAILQ_INIT(&mr
->subregions
);
1071 QTAILQ_INIT(&mr
->coalesced
);
1073 op
= object_property_add(OBJECT(mr
), "container",
1074 "link<" TYPE_MEMORY_REGION
">",
1075 memory_region_get_container
,
1076 NULL
, /* memory_region_set_container */
1077 NULL
, NULL
, &error_abort
);
1078 op
->resolve
= memory_region_resolve_container
;
1080 object_property_add(OBJECT(mr
), "addr", "uint64",
1081 memory_region_get_addr
,
1082 NULL
, /* memory_region_set_addr */
1083 NULL
, NULL
, &error_abort
);
1084 object_property_add(OBJECT(mr
), "priority", "uint32",
1085 memory_region_get_priority
,
1086 NULL
, /* memory_region_set_priority */
1087 NULL
, NULL
, &error_abort
);
1088 object_property_add(OBJECT(mr
), "size", "uint64",
1089 memory_region_get_size
,
1090 NULL
, /* memory_region_set_size, */
1091 NULL
, NULL
, &error_abort
);
1094 static int qemu_target_backtrace(target_ulong
*array
, size_t size
)
1098 #if defined(TARGET_ARM)
1099 CPUArchState
*env
= current_cpu
->env_ptr
;
1100 array
[0] = env
->regs
[15];
1101 array
[1] = env
->regs
[14];
1102 #elif defined(TARGET_MIPS)
1103 CPUArchState
*env
= current_cpu
->env_ptr
;
1104 array
[0] = env
->active_tc
.PC
;
1105 array
[1] = env
->active_tc
.gpr
[31];
1115 #include "disas/disas.h"
1116 const char *qemu_sprint_backtrace(char *buffer
, size_t length
)
1120 target_ulong caller
[2];
1122 qemu_target_backtrace(caller
, 2);
1123 symbol
= lookup_symbol(caller
[0]);
1124 p
+= sprintf(p
, "[%s]", symbol
);
1125 symbol
= lookup_symbol(caller
[1]);
1126 p
+= sprintf(p
, "[%s]", symbol
);
1128 p
+= sprintf(p
, "[cpu not running]");
1130 assert((p
- buffer
) < length
);
1134 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1137 if (trace_unassigned
) {
1139 fprintf(stderr
, "Unassigned mem read " TARGET_FMT_plx
" %s\n",
1140 addr
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1143 if (current_cpu
!= NULL
) {
1144 cpu_unassigned_access(current_cpu
, addr
, false, false, 0, size
);
1149 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1150 uint64_t val
, unsigned size
)
1152 if (trace_unassigned
) {
1154 fprintf(stderr
, "Unassigned mem write " TARGET_FMT_plx
1155 " = 0x%" PRIx64
" %s\n",
1156 addr
, val
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1158 if (current_cpu
!= NULL
) {
1159 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1163 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1164 unsigned size
, bool is_write
)
1169 const MemoryRegionOps unassigned_mem_ops
= {
1170 .valid
.accepts
= unassigned_mem_accepts
,
1171 .endianness
= DEVICE_NATIVE_ENDIAN
,
1174 static uint64_t memory_region_ram_device_read(void *opaque
,
1175 hwaddr addr
, unsigned size
)
1177 MemoryRegion
*mr
= opaque
;
1178 uint64_t data
= (uint64_t)~0;
1182 data
= *(uint8_t *)(mr
->ram_block
->host
+ addr
);
1185 data
= *(uint16_t *)(mr
->ram_block
->host
+ addr
);
1188 data
= *(uint32_t *)(mr
->ram_block
->host
+ addr
);
1191 data
= *(uint64_t *)(mr
->ram_block
->host
+ addr
);
1195 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1200 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1201 uint64_t data
, unsigned size
)
1203 MemoryRegion
*mr
= opaque
;
1205 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1209 *(uint8_t *)(mr
->ram_block
->host
+ addr
) = (uint8_t)data
;
1212 *(uint16_t *)(mr
->ram_block
->host
+ addr
) = (uint16_t)data
;
1215 *(uint32_t *)(mr
->ram_block
->host
+ addr
) = (uint32_t)data
;
1218 *(uint64_t *)(mr
->ram_block
->host
+ addr
) = data
;
1223 static const MemoryRegionOps ram_device_mem_ops
= {
1224 .read
= memory_region_ram_device_read
,
1225 .write
= memory_region_ram_device_write
,
1226 .endianness
= DEVICE_HOST_ENDIAN
,
1228 .min_access_size
= 1,
1229 .max_access_size
= 8,
1233 .min_access_size
= 1,
1234 .max_access_size
= 8,
1239 bool memory_region_access_valid(MemoryRegion
*mr
,
1244 int access_size_min
, access_size_max
;
1247 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1248 fprintf(stderr
, "Misaligned i/o to address %08" HWADDR_PRIx
1249 " with size %u for memory region %s\n",
1250 addr
, size
, mr
->name
);
1254 if (!mr
->ops
->valid
.accepts
) {
1258 access_size_min
= mr
->ops
->valid
.min_access_size
;
1259 if (!mr
->ops
->valid
.min_access_size
) {
1260 access_size_min
= 1;
1263 access_size_max
= mr
->ops
->valid
.max_access_size
;
1264 if (!mr
->ops
->valid
.max_access_size
) {
1265 access_size_max
= 4;
1268 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1269 for (i
= 0; i
< size
; i
+= access_size
) {
1270 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1279 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1287 if (mr
->ops
->read
) {
1288 return access_with_adjusted_size(addr
, pval
, size
,
1289 mr
->ops
->impl
.min_access_size
,
1290 mr
->ops
->impl
.max_access_size
,
1291 memory_region_read_accessor
,
1293 } else if (mr
->ops
->read_with_attrs
) {
1294 return access_with_adjusted_size(addr
, pval
, size
,
1295 mr
->ops
->impl
.min_access_size
,
1296 mr
->ops
->impl
.max_access_size
,
1297 memory_region_read_with_attrs_accessor
,
1300 return access_with_adjusted_size(addr
, pval
, size
, 1, 4,
1301 memory_region_oldmmio_read_accessor
,
1306 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1314 if (!memory_region_access_valid(mr
, addr
, size
, false)) {
1315 *pval
= unassigned_mem_read(mr
, addr
, size
);
1316 return MEMTX_DECODE_ERROR
;
1319 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1320 adjust_endianness(mr
, pval
, size
);
1324 /* Return true if an eventfd was signalled */
1325 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1331 MemoryRegionIoeventfd ioeventfd
= {
1332 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1337 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1338 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1339 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1341 if (memory_region_ioeventfd_equal(ioeventfd
, mr
->ioeventfds
[i
])) {
1342 event_notifier_set(ioeventfd
.e
);
1350 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1356 if (!memory_region_access_valid(mr
, addr
, size
, true)) {
1357 unassigned_mem_write(mr
, addr
, data
, size
);
1358 return MEMTX_DECODE_ERROR
;
1361 adjust_endianness(mr
, &data
, size
);
1363 if ((!kvm_eventfds_enabled()) &&
1364 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1368 if (mr
->ops
->write
) {
1369 return access_with_adjusted_size(addr
, &data
, size
,
1370 mr
->ops
->impl
.min_access_size
,
1371 mr
->ops
->impl
.max_access_size
,
1372 memory_region_write_accessor
, mr
,
1374 } else if (mr
->ops
->write_with_attrs
) {
1376 access_with_adjusted_size(addr
, &data
, size
,
1377 mr
->ops
->impl
.min_access_size
,
1378 mr
->ops
->impl
.max_access_size
,
1379 memory_region_write_with_attrs_accessor
,
1382 return access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1383 memory_region_oldmmio_write_accessor
,
1388 void memory_region_init_io(MemoryRegion
*mr
,
1390 const MemoryRegionOps
*ops
,
1395 memory_region_init(mr
, owner
, name
, size
);
1396 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1397 mr
->opaque
= opaque
;
1398 mr
->terminates
= true;
1401 void memory_region_init_ram(MemoryRegion
*mr
,
1407 memory_region_init(mr
, owner
, name
, size
);
1409 mr
->terminates
= true;
1410 mr
->destructor
= memory_region_destructor_ram
;
1411 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1412 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1415 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1420 void (*resized
)(const char*,
1425 memory_region_init(mr
, owner
, name
, size
);
1427 mr
->terminates
= true;
1428 mr
->destructor
= memory_region_destructor_ram
;
1429 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1431 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1435 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1436 struct Object
*owner
,
1443 memory_region_init(mr
, owner
, name
, size
);
1445 mr
->terminates
= true;
1446 mr
->destructor
= memory_region_destructor_ram
;
1447 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, share
, path
, errp
);
1448 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1452 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1458 memory_region_init(mr
, owner
, name
, size
);
1460 mr
->terminates
= true;
1461 mr
->destructor
= memory_region_destructor_ram
;
1462 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1464 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1465 assert(ptr
!= NULL
);
1466 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1469 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1475 memory_region_init_ram_ptr(mr
, owner
, name
, size
, ptr
);
1476 mr
->ram_device
= true;
1477 mr
->ops
= &ram_device_mem_ops
;
1481 void memory_region_init_alias(MemoryRegion
*mr
,
1488 memory_region_init(mr
, owner
, name
, size
);
1490 mr
->alias_offset
= offset
;
1493 void memory_region_init_rom(MemoryRegion
*mr
,
1494 struct Object
*owner
,
1499 memory_region_init(mr
, owner
, name
, size
);
1501 mr
->readonly
= true;
1502 mr
->terminates
= true;
1503 mr
->destructor
= memory_region_destructor_ram
;
1504 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1505 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1508 void memory_region_init_rom_device(MemoryRegion
*mr
,
1510 const MemoryRegionOps
*ops
,
1517 memory_region_init(mr
, owner
, name
, size
);
1519 mr
->opaque
= opaque
;
1520 mr
->terminates
= true;
1521 mr
->rom_device
= true;
1522 mr
->destructor
= memory_region_destructor_ram
;
1523 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1526 void memory_region_init_iommu(MemoryRegion
*mr
,
1528 const MemoryRegionIOMMUOps
*ops
,
1532 memory_region_init(mr
, owner
, name
, size
);
1533 mr
->iommu_ops
= ops
,
1534 mr
->terminates
= true; /* then re-forwards */
1535 QLIST_INIT(&mr
->iommu_notify
);
1536 mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1539 static void memory_region_finalize(Object
*obj
)
1541 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1543 assert(!mr
->container
);
1545 /* We know the region is not visible in any address space (it
1546 * does not have a container and cannot be a root either because
1547 * it has no references, so we can blindly clear mr->enabled.
1548 * memory_region_set_enabled instead could trigger a transaction
1549 * and cause an infinite loop.
1551 mr
->enabled
= false;
1552 memory_region_transaction_begin();
1553 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1554 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1555 memory_region_del_subregion(mr
, subregion
);
1557 memory_region_transaction_commit();
1560 memory_region_clear_coalescing(mr
);
1561 g_free((char *)mr
->name
);
1562 g_free(mr
->ioeventfds
);
1565 Object
*memory_region_owner(MemoryRegion
*mr
)
1567 Object
*obj
= OBJECT(mr
);
1571 void memory_region_ref(MemoryRegion
*mr
)
1573 /* MMIO callbacks most likely will access data that belongs
1574 * to the owner, hence the need to ref/unref the owner whenever
1575 * the memory region is in use.
1577 * The memory region is a child of its owner. As long as the
1578 * owner doesn't call unparent itself on the memory region,
1579 * ref-ing the owner will also keep the memory region alive.
1580 * Memory regions without an owner are supposed to never go away;
1581 * we do not ref/unref them because it slows down DMA sensibly.
1583 if (mr
&& mr
->owner
) {
1584 object_ref(mr
->owner
);
1588 void memory_region_unref(MemoryRegion
*mr
)
1590 if (mr
&& mr
->owner
) {
1591 object_unref(mr
->owner
);
1595 uint64_t memory_region_size(MemoryRegion
*mr
)
1597 if (int128_eq(mr
->size
, int128_2_64())) {
1600 return int128_get64(mr
->size
);
1603 const char *memory_region_name(const MemoryRegion
*mr
)
1606 ((MemoryRegion
*)mr
)->name
=
1607 object_get_canonical_path_component(OBJECT(mr
));
1612 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1614 return mr
->ram_device
;
1617 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1619 uint8_t mask
= mr
->dirty_log_mask
;
1620 if (global_dirty_log
&& mr
->ram_block
) {
1621 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1626 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1628 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1631 static void memory_region_update_iommu_notify_flags(MemoryRegion
*mr
)
1633 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1634 IOMMUNotifier
*iommu_notifier
;
1636 QLIST_FOREACH(iommu_notifier
, &mr
->iommu_notify
, node
) {
1637 flags
|= iommu_notifier
->notifier_flags
;
1640 if (flags
!= mr
->iommu_notify_flags
&&
1641 mr
->iommu_ops
->notify_flag_changed
) {
1642 mr
->iommu_ops
->notify_flag_changed(mr
, mr
->iommu_notify_flags
,
1646 mr
->iommu_notify_flags
= flags
;
1649 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1653 memory_region_register_iommu_notifier(mr
->alias
, n
);
1657 /* We need to register for at least one bitfield */
1658 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1659 QLIST_INSERT_HEAD(&mr
->iommu_notify
, n
, node
);
1660 memory_region_update_iommu_notify_flags(mr
);
1663 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion
*mr
)
1665 assert(memory_region_is_iommu(mr
));
1666 if (mr
->iommu_ops
&& mr
->iommu_ops
->get_min_page_size
) {
1667 return mr
->iommu_ops
->get_min_page_size(mr
);
1669 return TARGET_PAGE_SIZE
;
1672 void memory_region_iommu_replay(MemoryRegion
*mr
, IOMMUNotifier
*n
,
1675 hwaddr addr
, granularity
;
1676 IOMMUTLBEntry iotlb
;
1678 granularity
= memory_region_iommu_get_min_page_size(mr
);
1680 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1681 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
1682 if (iotlb
.perm
!= IOMMU_NONE
) {
1683 n
->notify(n
, &iotlb
);
1686 /* if (2^64 - MR size) < granularity, it's possible to get an
1687 * infinite loop here. This should catch such a wraparound */
1688 if ((addr
+ granularity
) < addr
) {
1694 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1698 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
1701 QLIST_REMOVE(n
, node
);
1702 memory_region_update_iommu_notify_flags(mr
);
1705 void memory_region_notify_iommu(MemoryRegion
*mr
,
1706 IOMMUTLBEntry entry
)
1708 IOMMUNotifier
*iommu_notifier
;
1709 IOMMUNotifierFlag request_flags
;
1711 assert(memory_region_is_iommu(mr
));
1713 if (entry
.perm
& IOMMU_RW
) {
1714 request_flags
= IOMMU_NOTIFIER_MAP
;
1716 request_flags
= IOMMU_NOTIFIER_UNMAP
;
1719 QLIST_FOREACH(iommu_notifier
, &mr
->iommu_notify
, node
) {
1720 if (iommu_notifier
->notifier_flags
& request_flags
) {
1721 iommu_notifier
->notify(iommu_notifier
, &entry
);
1726 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
1728 uint8_t mask
= 1 << client
;
1729 uint8_t old_logging
;
1731 assert(client
== DIRTY_MEMORY_VGA
);
1732 old_logging
= mr
->vga_logging_count
;
1733 mr
->vga_logging_count
+= log
? 1 : -1;
1734 if (!!old_logging
== !!mr
->vga_logging_count
) {
1738 memory_region_transaction_begin();
1739 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
1740 memory_region_update_pending
|= mr
->enabled
;
1741 memory_region_transaction_commit();
1744 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
1745 hwaddr size
, unsigned client
)
1747 assert(mr
->ram_block
);
1748 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr
) + addr
,
1752 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
1755 assert(mr
->ram_block
);
1756 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
1758 memory_region_get_dirty_log_mask(mr
));
1761 bool memory_region_test_and_clear_dirty(MemoryRegion
*mr
, hwaddr addr
,
1762 hwaddr size
, unsigned client
)
1764 assert(mr
->ram_block
);
1765 return cpu_physical_memory_test_and_clear_dirty(
1766 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1770 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
1772 MemoryListener
*listener
;
1777 /* If the same address space has multiple log_sync listeners, we
1778 * visit that address space's FlatView multiple times. But because
1779 * log_sync listeners are rare, it's still cheaper than walking each
1780 * address space once.
1782 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
1783 if (!listener
->log_sync
) {
1786 as
= listener
->address_space
;
1787 view
= address_space_get_flatview(as
);
1788 FOR_EACH_FLAT_RANGE(fr
, view
) {
1790 MemoryRegionSection mrs
= section_from_flat_range(fr
, as
);
1791 listener
->log_sync(listener
, &mrs
);
1794 flatview_unref(view
);
1798 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
1800 if (mr
->readonly
!= readonly
) {
1801 memory_region_transaction_begin();
1802 mr
->readonly
= readonly
;
1803 memory_region_update_pending
|= mr
->enabled
;
1804 memory_region_transaction_commit();
1808 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
1810 if (mr
->romd_mode
!= romd_mode
) {
1811 memory_region_transaction_begin();
1812 mr
->romd_mode
= romd_mode
;
1813 memory_region_update_pending
|= mr
->enabled
;
1814 memory_region_transaction_commit();
1818 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
1819 hwaddr size
, unsigned client
)
1821 assert(mr
->ram_block
);
1822 cpu_physical_memory_test_and_clear_dirty(
1823 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1826 int memory_region_get_fd(MemoryRegion
*mr
)
1834 fd
= mr
->ram_block
->fd
;
1840 void memory_region_set_fd(MemoryRegion
*mr
, int fd
)
1846 mr
->ram_block
->fd
= fd
;
1850 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
1853 uint64_t offset
= 0;
1857 offset
+= mr
->alias_offset
;
1860 assert(mr
->ram_block
);
1861 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
1867 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
1871 block
= qemu_ram_block_from_host(ptr
, false, offset
);
1879 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
1881 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
1884 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
1886 assert(mr
->ram_block
);
1888 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
1891 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
1895 CoalescedMemoryRange
*cmr
;
1897 MemoryRegionSection section
;
1899 view
= address_space_get_flatview(as
);
1900 FOR_EACH_FLAT_RANGE(fr
, view
) {
1902 section
= (MemoryRegionSection
) {
1903 .address_space
= as
,
1904 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
1905 .size
= fr
->addr
.size
,
1908 MEMORY_LISTENER_CALL(as
, coalesced_mmio_del
, Reverse
, §ion
,
1909 int128_get64(fr
->addr
.start
),
1910 int128_get64(fr
->addr
.size
));
1911 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
1912 tmp
= addrrange_shift(cmr
->addr
,
1913 int128_sub(fr
->addr
.start
,
1914 int128_make64(fr
->offset_in_region
)));
1915 if (!addrrange_intersects(tmp
, fr
->addr
)) {
1918 tmp
= addrrange_intersection(tmp
, fr
->addr
);
1919 MEMORY_LISTENER_CALL(as
, coalesced_mmio_add
, Forward
, §ion
,
1920 int128_get64(tmp
.start
),
1921 int128_get64(tmp
.size
));
1925 flatview_unref(view
);
1928 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
1932 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1933 memory_region_update_coalesced_range_as(mr
, as
);
1937 void memory_region_set_coalescing(MemoryRegion
*mr
)
1939 memory_region_clear_coalescing(mr
);
1940 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
1943 void memory_region_add_coalescing(MemoryRegion
*mr
,
1947 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
1949 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
1950 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
1951 memory_region_update_coalesced_range(mr
);
1952 memory_region_set_flush_coalesced(mr
);
1955 void memory_region_clear_coalescing(MemoryRegion
*mr
)
1957 CoalescedMemoryRange
*cmr
;
1958 bool updated
= false;
1960 qemu_flush_coalesced_mmio_buffer();
1961 mr
->flush_coalesced_mmio
= false;
1963 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
1964 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
1965 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
1971 memory_region_update_coalesced_range(mr
);
1975 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
1977 mr
->flush_coalesced_mmio
= true;
1980 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
1982 qemu_flush_coalesced_mmio_buffer();
1983 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
1984 mr
->flush_coalesced_mmio
= false;
1988 void memory_region_set_global_locking(MemoryRegion
*mr
)
1990 mr
->global_locking
= true;
1993 void memory_region_clear_global_locking(MemoryRegion
*mr
)
1995 mr
->global_locking
= false;
1998 static bool userspace_eventfd_warning
;
2000 void memory_region_add_eventfd(MemoryRegion
*mr
,
2007 MemoryRegionIoeventfd mrfd
= {
2008 .addr
.start
= int128_make64(addr
),
2009 .addr
.size
= int128_make64(size
),
2010 .match_data
= match_data
,
2016 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2017 userspace_eventfd_warning
))) {
2018 userspace_eventfd_warning
= true;
2019 error_report("Using eventfd without MMIO binding in KVM. "
2020 "Suboptimal performance expected");
2024 adjust_endianness(mr
, &mrfd
.data
, size
);
2026 memory_region_transaction_begin();
2027 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2028 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
2033 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2034 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2035 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2036 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2037 mr
->ioeventfds
[i
] = mrfd
;
2038 ioeventfd_update_pending
|= mr
->enabled
;
2039 memory_region_transaction_commit();
2042 void memory_region_del_eventfd(MemoryRegion
*mr
,
2049 MemoryRegionIoeventfd mrfd
= {
2050 .addr
.start
= int128_make64(addr
),
2051 .addr
.size
= int128_make64(size
),
2052 .match_data
= match_data
,
2059 adjust_endianness(mr
, &mrfd
.data
, size
);
2061 memory_region_transaction_begin();
2062 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2063 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
2067 assert(i
!= mr
->ioeventfd_nb
);
2068 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2069 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2071 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2072 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2073 ioeventfd_update_pending
|= mr
->enabled
;
2074 memory_region_transaction_commit();
2077 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2079 MemoryRegion
*mr
= subregion
->container
;
2080 MemoryRegion
*other
;
2082 memory_region_transaction_begin();
2084 memory_region_ref(subregion
);
2085 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2086 if (subregion
->priority
>= other
->priority
) {
2087 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2091 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2093 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2094 memory_region_transaction_commit();
2097 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2099 MemoryRegion
*subregion
)
2101 assert(!subregion
->container
);
2102 subregion
->container
= mr
;
2103 subregion
->addr
= offset
;
2104 memory_region_update_container_subregions(subregion
);
2107 void memory_region_add_subregion(MemoryRegion
*mr
,
2109 MemoryRegion
*subregion
)
2111 subregion
->priority
= 0;
2112 memory_region_add_subregion_common(mr
, offset
, subregion
);
2115 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2117 MemoryRegion
*subregion
,
2120 subregion
->priority
= priority
;
2121 memory_region_add_subregion_common(mr
, offset
, subregion
);
2124 void memory_region_del_subregion(MemoryRegion
*mr
,
2125 MemoryRegion
*subregion
)
2127 memory_region_transaction_begin();
2128 assert(subregion
->container
== mr
);
2129 subregion
->container
= NULL
;
2130 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2131 memory_region_unref(subregion
);
2132 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2133 memory_region_transaction_commit();
2136 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2138 if (enabled
== mr
->enabled
) {
2141 memory_region_transaction_begin();
2142 mr
->enabled
= enabled
;
2143 memory_region_update_pending
= true;
2144 memory_region_transaction_commit();
2147 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2149 Int128 s
= int128_make64(size
);
2151 if (size
== UINT64_MAX
) {
2154 if (int128_eq(s
, mr
->size
)) {
2157 memory_region_transaction_begin();
2159 memory_region_update_pending
= true;
2160 memory_region_transaction_commit();
2163 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2165 MemoryRegion
*container
= mr
->container
;
2168 memory_region_transaction_begin();
2169 memory_region_ref(mr
);
2170 memory_region_del_subregion(container
, mr
);
2171 mr
->container
= container
;
2172 memory_region_update_container_subregions(mr
);
2173 memory_region_unref(mr
);
2174 memory_region_transaction_commit();
2178 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2180 if (addr
!= mr
->addr
) {
2182 memory_region_readd_subregion(mr
);
2186 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2190 if (offset
== mr
->alias_offset
) {
2194 memory_region_transaction_begin();
2195 mr
->alias_offset
= offset
;
2196 memory_region_update_pending
|= mr
->enabled
;
2197 memory_region_transaction_commit();
2200 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2205 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2207 const AddrRange
*addr
= addr_
;
2208 const FlatRange
*fr
= fr_
;
2210 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2212 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2218 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2220 return bsearch(&addr
, view
->ranges
, view
->nr
,
2221 sizeof(FlatRange
), cmp_flatrange_addr
);
2224 bool memory_region_is_mapped(MemoryRegion
*mr
)
2226 return mr
->container
? true : false;
2229 /* Same as memory_region_find, but it does not add a reference to the
2230 * returned region. It must be called from an RCU critical section.
2232 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2233 hwaddr addr
, uint64_t size
)
2235 MemoryRegionSection ret
= { .mr
= NULL
};
2243 for (root
= mr
; root
->container
; ) {
2244 root
= root
->container
;
2248 as
= memory_region_to_address_space(root
);
2252 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2254 view
= atomic_rcu_read(&as
->current_map
);
2255 fr
= flatview_lookup(view
, range
);
2260 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2265 ret
.address_space
= as
;
2266 range
= addrrange_intersection(range
, fr
->addr
);
2267 ret
.offset_within_region
= fr
->offset_in_region
;
2268 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2270 ret
.size
= range
.size
;
2271 ret
.offset_within_address_space
= int128_get64(range
.start
);
2272 ret
.readonly
= fr
->readonly
;
2276 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2277 hwaddr addr
, uint64_t size
)
2279 MemoryRegionSection ret
;
2281 ret
= memory_region_find_rcu(mr
, addr
, size
);
2283 memory_region_ref(ret
.mr
);
2289 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2294 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2296 return mr
&& mr
!= container
;
2299 void memory_global_dirty_log_sync(void)
2301 MemoryListener
*listener
;
2306 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2307 if (!listener
->log_sync
) {
2310 as
= listener
->address_space
;
2311 view
= address_space_get_flatview(as
);
2312 FOR_EACH_FLAT_RANGE(fr
, view
) {
2313 if (fr
->dirty_log_mask
) {
2314 MemoryRegionSection mrs
= section_from_flat_range(fr
, as
);
2315 listener
->log_sync(listener
, &mrs
);
2318 flatview_unref(view
);
2322 void memory_global_dirty_log_start(void)
2324 global_dirty_log
= true;
2326 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2328 /* Refresh DIRTY_LOG_MIGRATION bit. */
2329 memory_region_transaction_begin();
2330 memory_region_update_pending
= true;
2331 memory_region_transaction_commit();
2334 void memory_global_dirty_log_stop(void)
2336 global_dirty_log
= false;
2338 /* Refresh DIRTY_LOG_MIGRATION bit. */
2339 memory_region_transaction_begin();
2340 memory_region_update_pending
= true;
2341 memory_region_transaction_commit();
2343 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2346 static void listener_add_address_space(MemoryListener
*listener
,
2352 if (listener
->begin
) {
2353 listener
->begin(listener
);
2355 if (global_dirty_log
) {
2356 if (listener
->log_global_start
) {
2357 listener
->log_global_start(listener
);
2361 view
= address_space_get_flatview(as
);
2362 FOR_EACH_FLAT_RANGE(fr
, view
) {
2363 MemoryRegionSection section
= {
2365 .address_space
= as
,
2366 .offset_within_region
= fr
->offset_in_region
,
2367 .size
= fr
->addr
.size
,
2368 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
2369 .readonly
= fr
->readonly
,
2371 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2372 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2374 if (listener
->region_add
) {
2375 listener
->region_add(listener
, §ion
);
2378 if (listener
->commit
) {
2379 listener
->commit(listener
);
2381 flatview_unref(view
);
2384 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2386 MemoryListener
*other
= NULL
;
2388 listener
->address_space
= as
;
2389 if (QTAILQ_EMPTY(&memory_listeners
)
2390 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
,
2391 memory_listeners
)->priority
) {
2392 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2394 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2395 if (listener
->priority
< other
->priority
) {
2399 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2402 if (QTAILQ_EMPTY(&as
->listeners
)
2403 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
,
2404 memory_listeners
)->priority
) {
2405 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
2407 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
2408 if (listener
->priority
< other
->priority
) {
2412 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
2415 listener_add_address_space(listener
, as
);
2418 void memory_listener_unregister(MemoryListener
*listener
)
2420 if (!listener
->address_space
) {
2424 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2425 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
2426 listener
->address_space
= NULL
;
2429 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2431 memory_region_ref(root
);
2432 memory_region_transaction_begin();
2435 as
->malloced
= false;
2436 as
->current_map
= g_new(FlatView
, 1);
2437 flatview_init(as
->current_map
);
2438 as
->ioeventfd_nb
= 0;
2439 as
->ioeventfds
= NULL
;
2440 QTAILQ_INIT(&as
->listeners
);
2441 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2442 as
->name
= g_strdup(name
? name
: "anonymous");
2443 address_space_init_dispatch(as
);
2444 memory_region_update_pending
|= root
->enabled
;
2445 memory_region_transaction_commit();
2448 static void do_address_space_destroy(AddressSpace
*as
)
2450 bool do_free
= as
->malloced
;
2452 address_space_destroy_dispatch(as
);
2453 assert(QTAILQ_EMPTY(&as
->listeners
));
2455 flatview_unref(as
->current_map
);
2457 g_free(as
->ioeventfds
);
2458 memory_region_unref(as
->root
);
2464 AddressSpace
*address_space_init_shareable(MemoryRegion
*root
, const char *name
)
2468 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2469 if (root
== as
->root
&& as
->malloced
) {
2475 as
= g_malloc0(sizeof *as
);
2476 address_space_init(as
, root
, name
);
2477 as
->malloced
= true;
2481 void address_space_destroy(AddressSpace
*as
)
2483 MemoryRegion
*root
= as
->root
;
2486 if (as
->ref_count
) {
2489 /* Flush out anything from MemoryListeners listening in on this */
2490 memory_region_transaction_begin();
2492 memory_region_transaction_commit();
2493 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2494 address_space_unregister(as
);
2496 /* At this point, as->dispatch and as->current_map are dummy
2497 * entries that the guest should never use. Wait for the old
2498 * values to expire before freeing the data.
2501 call_rcu(as
, do_address_space_destroy
, rcu
);
2504 static const char *memory_region_type(MemoryRegion
*mr
)
2506 if (memory_region_is_ram_device(mr
)) {
2508 } else if (memory_region_is_romd(mr
)) {
2510 } else if (memory_region_is_rom(mr
)) {
2512 } else if (memory_region_is_ram(mr
)) {
2519 typedef struct MemoryRegionList MemoryRegionList
;
2521 struct MemoryRegionList
{
2522 const MemoryRegion
*mr
;
2523 QTAILQ_ENTRY(MemoryRegionList
) queue
;
2526 typedef QTAILQ_HEAD(queue
, MemoryRegionList
) MemoryRegionListHead
;
2528 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2529 int128_sub((size), int128_one())) : 0)
2530 #define MTREE_INDENT " "
2532 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
2533 const MemoryRegion
*mr
, unsigned int level
,
2535 MemoryRegionListHead
*alias_print_queue
)
2537 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2538 MemoryRegionListHead submr_print_queue
;
2539 const MemoryRegion
*submr
;
2541 hwaddr cur_start
, cur_end
;
2547 for (i
= 0; i
< level
; i
++) {
2548 mon_printf(f
, MTREE_INDENT
);
2551 cur_start
= base
+ mr
->addr
;
2552 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
2555 * Try to detect overflow of memory region. This should never
2556 * happen normally. When it happens, we dump something to warn the
2557 * user who is observing this.
2559 if (cur_start
< base
|| cur_end
< cur_start
) {
2560 mon_printf(f
, "[DETECTED OVERFLOW!] ");
2564 MemoryRegionList
*ml
;
2567 /* check if the alias is already in the queue */
2568 QTAILQ_FOREACH(ml
, alias_print_queue
, queue
) {
2569 if (ml
->mr
== mr
->alias
) {
2575 ml
= g_new(MemoryRegionList
, 1);
2577 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, queue
);
2579 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
2580 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2581 "-" TARGET_FMT_plx
"%s\n",
2584 memory_region_type((MemoryRegion
*)mr
),
2585 memory_region_name(mr
),
2586 memory_region_name(mr
->alias
),
2588 mr
->alias_offset
+ MR_SIZE(mr
->size
),
2589 mr
->enabled
? "" : " [disabled]");
2592 TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d, %s): %s%s\n",
2595 memory_region_type((MemoryRegion
*)mr
),
2596 memory_region_name(mr
),
2597 mr
->enabled
? "" : " [disabled]");
2600 QTAILQ_INIT(&submr_print_queue
);
2602 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2603 new_ml
= g_new(MemoryRegionList
, 1);
2605 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2606 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2607 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2608 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2609 QTAILQ_INSERT_BEFORE(ml
, new_ml
, queue
);
2615 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, queue
);
2619 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2620 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, cur_start
,
2624 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, queue
, next_ml
) {
2629 static void mtree_print_flatview(fprintf_function p
, void *f
,
2632 FlatView
*view
= address_space_get_flatview(as
);
2633 FlatRange
*range
= &view
->ranges
[0];
2638 p(f
, MTREE_INDENT
"No rendered FlatView for "
2639 "address space '%s'\n", as
->name
);
2640 flatview_unref(view
);
2646 if (range
->offset_in_region
) {
2647 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
2648 TARGET_FMT_plx
" (prio %d, %s): %s @" TARGET_FMT_plx
"\n",
2649 int128_get64(range
->addr
.start
),
2650 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
2652 range
->readonly
? "rom" : memory_region_type(mr
),
2653 memory_region_name(mr
),
2654 range
->offset_in_region
);
2656 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
2657 TARGET_FMT_plx
" (prio %d, %s): %s\n",
2658 int128_get64(range
->addr
.start
),
2659 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
2661 range
->readonly
? "rom" : memory_region_type(mr
),
2662 memory_region_name(mr
));
2667 flatview_unref(view
);
2670 void mtree_info(fprintf_function mon_printf
, void *f
, bool flatview
)
2672 MemoryRegionListHead ml_head
;
2673 MemoryRegionList
*ml
, *ml2
;
2677 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2678 mon_printf(f
, "address-space (flat view): %s\n", as
->name
);
2679 mtree_print_flatview(mon_printf
, f
, as
);
2680 mon_printf(f
, "\n");
2685 QTAILQ_INIT(&ml_head
);
2687 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2688 mon_printf(f
, "address-space: %s\n", as
->name
);
2689 mtree_print_mr(mon_printf
, f
, as
->root
, 1, 0, &ml_head
);
2690 mon_printf(f
, "\n");
2693 /* print aliased regions */
2694 QTAILQ_FOREACH(ml
, &ml_head
, queue
) {
2695 mon_printf(f
, "memory-region: %s\n", memory_region_name(ml
->mr
));
2696 mtree_print_mr(mon_printf
, f
, ml
->mr
, 1, 0, &ml_head
);
2697 mon_printf(f
, "\n");
2700 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, queue
, ml2
) {
2705 static const TypeInfo memory_region_info
= {
2706 .parent
= TYPE_OBJECT
,
2707 .name
= TYPE_MEMORY_REGION
,
2708 .instance_size
= sizeof(MemoryRegion
),
2709 .instance_init
= memory_region_initfn
,
2710 .instance_finalize
= memory_region_finalize
,
2713 static void memory_register_types(void)
2715 type_register_static(&memory_region_info
);
2718 type_init(memory_register_types
)