2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "exec/exec-all.h" /* qemu_sprint_backtrace */
20 #include "exec/memory.h"
21 #include "qapi/visitor.h"
22 #include "qemu/bitops.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qemu/qemu-print.h"
26 #include "qom/object.h"
27 #include "sysemu/sysemu.h" /* trace_unassigned */
30 #include "exec/memory-internal.h"
31 #include "exec/ram_addr.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/runstate.h"
34 #include "sysemu/tcg.h"
35 #include "qemu/accel.h"
36 #include "hw/boards.h"
37 #include "migration/vmstate.h"
38 #include "exec/address-spaces.h"
40 //#define DEBUG_UNASSIGNED
42 static unsigned memory_region_transaction_depth
;
43 static bool memory_region_update_pending
;
44 static bool ioeventfd_update_pending
;
45 unsigned int global_dirty_tracking
;
47 static QTAILQ_HEAD(, MemoryListener
) memory_listeners
48 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
50 static QTAILQ_HEAD(, AddressSpace
) address_spaces
51 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
53 static GHashTable
*flat_views
;
55 typedef struct AddrRange AddrRange
;
58 * Note that signed integers are needed for negative offsetting in aliases
59 * (large MemoryRegion::alias_offset).
66 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
68 return (AddrRange
) { start
, size
};
71 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
73 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
76 static Int128
addrrange_end(AddrRange r
)
78 return int128_add(r
.start
, r
.size
);
81 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
83 int128_addto(&range
.start
, delta
);
87 static bool addrrange_contains(AddrRange range
, Int128 addr
)
89 return int128_ge(addr
, range
.start
)
90 && int128_lt(addr
, addrrange_end(range
));
93 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
95 return addrrange_contains(r1
, r2
.start
)
96 || addrrange_contains(r2
, r1
.start
);
99 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
101 Int128 start
= int128_max(r1
.start
, r2
.start
);
102 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
103 return addrrange_make(start
, int128_sub(end
, start
));
106 enum ListenerDirection
{ Forward
, Reverse
};
108 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
110 MemoryListener *_listener; \
112 switch (_direction) { \
114 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
115 if (_listener->_callback) { \
116 _listener->_callback(_listener, ##_args); \
121 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
122 if (_listener->_callback) { \
123 _listener->_callback(_listener, ##_args); \
132 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
134 MemoryListener *_listener; \
136 switch (_direction) { \
138 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
139 if (_listener->_callback) { \
140 _listener->_callback(_listener, _section, ##_args); \
145 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
146 if (_listener->_callback) { \
147 _listener->_callback(_listener, _section, ##_args); \
156 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
157 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
159 MemoryRegionSection mrs = section_from_flat_range(fr, \
160 address_space_to_flatview(as)); \
161 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
164 struct CoalescedMemoryRange
{
166 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
169 struct MemoryRegionIoeventfd
{
176 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd
*a
,
177 MemoryRegionIoeventfd
*b
)
179 if (int128_lt(a
->addr
.start
, b
->addr
.start
)) {
181 } else if (int128_gt(a
->addr
.start
, b
->addr
.start
)) {
183 } else if (int128_lt(a
->addr
.size
, b
->addr
.size
)) {
185 } else if (int128_gt(a
->addr
.size
, b
->addr
.size
)) {
187 } else if (a
->match_data
< b
->match_data
) {
189 } else if (a
->match_data
> b
->match_data
) {
191 } else if (a
->match_data
) {
192 if (a
->data
< b
->data
) {
194 } else if (a
->data
> b
->data
) {
200 } else if (a
->e
> b
->e
) {
206 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd
*a
,
207 MemoryRegionIoeventfd
*b
)
209 if (int128_eq(a
->addr
.start
, b
->addr
.start
) &&
210 (!int128_nz(a
->addr
.size
) || !int128_nz(b
->addr
.size
) ||
211 (int128_eq(a
->addr
.size
, b
->addr
.size
) &&
212 (a
->match_data
== b
->match_data
) &&
213 ((a
->match_data
&& (a
->data
== b
->data
)) || !a
->match_data
) &&
220 /* Range of memory in the global map. Addresses are absolute. */
223 hwaddr offset_in_region
;
225 uint8_t dirty_log_mask
;
232 #define FOR_EACH_FLAT_RANGE(var, view) \
233 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
235 static inline MemoryRegionSection
236 section_from_flat_range(FlatRange
*fr
, FlatView
*fv
)
238 return (MemoryRegionSection
) {
241 .offset_within_region
= fr
->offset_in_region
,
242 .size
= fr
->addr
.size
,
243 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
244 .readonly
= fr
->readonly
,
245 .nonvolatile
= fr
->nonvolatile
,
246 .unmergeable
= fr
->unmergeable
,
250 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
252 return a
->mr
== b
->mr
253 && addrrange_equal(a
->addr
, b
->addr
)
254 && a
->offset_in_region
== b
->offset_in_region
255 && a
->romd_mode
== b
->romd_mode
256 && a
->readonly
== b
->readonly
257 && a
->nonvolatile
== b
->nonvolatile
258 && a
->unmergeable
== b
->unmergeable
;
261 static FlatView
*flatview_new(MemoryRegion
*mr_root
)
265 view
= g_new0(FlatView
, 1);
267 view
->root
= mr_root
;
268 memory_region_ref(mr_root
);
269 trace_flatview_new(view
, mr_root
);
274 /* Insert a range into a given position. Caller is responsible for maintaining
277 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
279 if (view
->nr
== view
->nr_allocated
) {
280 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
281 view
->ranges
= g_realloc(view
->ranges
,
282 view
->nr_allocated
* sizeof(*view
->ranges
));
284 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
285 (view
->nr
- pos
) * sizeof(FlatRange
));
286 view
->ranges
[pos
] = *range
;
287 memory_region_ref(range
->mr
);
291 static void flatview_destroy(FlatView
*view
)
295 trace_flatview_destroy(view
, view
->root
);
296 if (view
->dispatch
) {
297 address_space_dispatch_free(view
->dispatch
);
299 for (i
= 0; i
< view
->nr
; i
++) {
300 memory_region_unref(view
->ranges
[i
].mr
);
302 g_free(view
->ranges
);
303 memory_region_unref(view
->root
);
307 static bool flatview_ref(FlatView
*view
)
309 return qatomic_fetch_inc_nonzero(&view
->ref
) > 0;
312 void flatview_unref(FlatView
*view
)
314 if (qatomic_fetch_dec(&view
->ref
) == 1) {
315 trace_flatview_destroy_rcu(view
, view
->root
);
317 call_rcu(view
, flatview_destroy
, rcu
);
321 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
323 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
325 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
327 int128_make64(r2
->offset_in_region
))
328 && r1
->dirty_log_mask
== r2
->dirty_log_mask
329 && r1
->romd_mode
== r2
->romd_mode
330 && r1
->readonly
== r2
->readonly
331 && r1
->nonvolatile
== r2
->nonvolatile
332 && !r1
->unmergeable
&& !r2
->unmergeable
;
335 /* Attempt to simplify a view by merging adjacent ranges */
336 static void flatview_simplify(FlatView
*view
)
341 while (i
< view
->nr
) {
344 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
345 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
349 for (k
= i
; k
< j
; k
++) {
350 memory_region_unref(view
->ranges
[k
].mr
);
352 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
353 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
358 static bool memory_region_big_endian(MemoryRegion
*mr
)
360 #if TARGET_BIG_ENDIAN
361 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
363 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
367 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, MemOp op
)
369 if ((op
& MO_BSWAP
) != devend_memop(mr
->ops
->endianness
)) {
370 switch (op
& MO_SIZE
) {
374 *data
= bswap16(*data
);
377 *data
= bswap32(*data
);
380 *data
= bswap64(*data
);
383 g_assert_not_reached();
388 static inline void memory_region_shift_read_access(uint64_t *value
,
394 *value
|= (tmp
& mask
) << shift
;
396 *value
|= (tmp
& mask
) >> -shift
;
400 static inline uint64_t memory_region_shift_write_access(uint64_t *value
,
407 tmp
= (*value
>> shift
) & mask
;
409 tmp
= (*value
<< -shift
) & mask
;
415 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
418 hwaddr abs_addr
= offset
;
420 abs_addr
+= mr
->addr
;
421 for (root
= mr
; root
->container
; ) {
422 root
= root
->container
;
423 abs_addr
+= root
->addr
;
429 static int get_cpu_index(void)
432 return current_cpu
->cpu_index
;
437 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
447 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
449 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
450 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ
)) {
451 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
452 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
,
453 memory_region_name(mr
));
455 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
459 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
470 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
472 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
473 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ
)) {
474 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
475 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
,
476 memory_region_name(mr
));
478 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
482 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
490 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
493 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
494 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE
)) {
495 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
496 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
,
497 memory_region_name(mr
));
499 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
503 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
511 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
514 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
515 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE
)) {
516 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
517 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
,
518 memory_region_name(mr
));
520 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
523 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
526 unsigned access_size_min
,
527 unsigned access_size_max
,
528 MemTxResult (*access_fn
)
539 uint64_t access_mask
;
540 unsigned access_size
;
542 MemTxResult r
= MEMTX_OK
;
543 bool reentrancy_guard_applied
= false;
545 if (!access_size_min
) {
548 if (!access_size_max
) {
552 /* Do not allow more than one simultaneous access to a device's IO Regions */
553 if (mr
->dev
&& !mr
->disable_reentrancy_guard
&&
554 !mr
->ram_device
&& !mr
->ram
&& !mr
->rom_device
&& !mr
->readonly
) {
555 if (mr
->dev
->mem_reentrancy_guard
.engaged_in_io
) {
556 warn_report_once("Blocked re-entrant IO on MemoryRegion: "
557 "%s at addr: 0x%" HWADDR_PRIX
,
558 memory_region_name(mr
), addr
);
559 return MEMTX_ACCESS_ERROR
;
561 mr
->dev
->mem_reentrancy_guard
.engaged_in_io
= true;
562 reentrancy_guard_applied
= true;
565 /* FIXME: support unaligned access? */
566 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
567 access_mask
= MAKE_64BIT_MASK(0, access_size
* 8);
568 if (memory_region_big_endian(mr
)) {
569 for (i
= 0; i
< size
; i
+= access_size
) {
570 r
|= access_fn(mr
, addr
+ i
, value
, access_size
,
571 (size
- access_size
- i
) * 8, access_mask
, attrs
);
574 for (i
= 0; i
< size
; i
+= access_size
) {
575 r
|= access_fn(mr
, addr
+ i
, value
, access_size
, i
* 8,
579 if (mr
->dev
&& reentrancy_guard_applied
) {
580 mr
->dev
->mem_reentrancy_guard
.engaged_in_io
= false;
585 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
589 while (mr
->container
) {
592 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
593 if (mr
== as
->root
) {
600 /* Render a memory region into the global view. Ranges in @view obscure
603 static void render_memory_region(FlatView
*view
,
611 MemoryRegion
*subregion
;
613 hwaddr offset_in_region
;
623 int128_addto(&base
, int128_make64(mr
->addr
));
624 readonly
|= mr
->readonly
;
625 nonvolatile
|= mr
->nonvolatile
;
626 unmergeable
|= mr
->unmergeable
;
628 tmp
= addrrange_make(base
, mr
->size
);
630 if (!addrrange_intersects(tmp
, clip
)) {
634 clip
= addrrange_intersection(tmp
, clip
);
637 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
638 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
639 render_memory_region(view
, mr
->alias
, base
, clip
,
640 readonly
, nonvolatile
, unmergeable
);
644 /* Render subregions in priority order. */
645 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
646 render_memory_region(view
, subregion
, base
, clip
,
647 readonly
, nonvolatile
, unmergeable
);
650 if (!mr
->terminates
) {
654 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
659 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
660 fr
.romd_mode
= mr
->romd_mode
;
661 fr
.readonly
= readonly
;
662 fr
.nonvolatile
= nonvolatile
;
663 fr
.unmergeable
= unmergeable
;
665 /* Render the region itself into any gaps left by the current view. */
666 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
667 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
670 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
671 now
= int128_min(remain
,
672 int128_sub(view
->ranges
[i
].addr
.start
, base
));
673 fr
.offset_in_region
= offset_in_region
;
674 fr
.addr
= addrrange_make(base
, now
);
675 flatview_insert(view
, i
, &fr
);
677 int128_addto(&base
, now
);
678 offset_in_region
+= int128_get64(now
);
679 int128_subfrom(&remain
, now
);
681 now
= int128_sub(int128_min(int128_add(base
, remain
),
682 addrrange_end(view
->ranges
[i
].addr
)),
684 int128_addto(&base
, now
);
685 offset_in_region
+= int128_get64(now
);
686 int128_subfrom(&remain
, now
);
688 if (int128_nz(remain
)) {
689 fr
.offset_in_region
= offset_in_region
;
690 fr
.addr
= addrrange_make(base
, remain
);
691 flatview_insert(view
, i
, &fr
);
695 void flatview_for_each_range(FlatView
*fv
, flatview_cb cb
, void *opaque
)
702 FOR_EACH_FLAT_RANGE(fr
, fv
) {
703 if (cb(fr
->addr
.start
, fr
->addr
.size
, fr
->mr
,
704 fr
->offset_in_region
, opaque
)) {
710 static MemoryRegion
*memory_region_get_flatview_root(MemoryRegion
*mr
)
712 while (mr
->enabled
) {
714 if (!mr
->alias_offset
&& int128_ge(mr
->size
, mr
->alias
->size
)) {
715 /* The alias is included in its entirety. Use it as
716 * the "real" root, so that we can share more FlatViews.
721 } else if (!mr
->terminates
) {
722 unsigned int found
= 0;
723 MemoryRegion
*child
, *next
= NULL
;
724 QTAILQ_FOREACH(child
, &mr
->subregions
, subregions_link
) {
725 if (child
->enabled
) {
730 if (!child
->addr
&& int128_ge(mr
->size
, child
->size
)) {
731 /* A child is included in its entirety. If it's the only
732 * enabled one, use it in the hope of finding an alias down the
733 * way. This will also let us share FlatViews.
754 /* Render a memory topology into a list of disjoint absolute ranges. */
755 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
760 view
= flatview_new(mr
);
763 render_memory_region(view
, mr
, int128_zero(),
764 addrrange_make(int128_zero(), int128_2_64()),
765 false, false, false);
767 flatview_simplify(view
);
769 view
->dispatch
= address_space_dispatch_new(view
);
770 for (i
= 0; i
< view
->nr
; i
++) {
771 MemoryRegionSection mrs
=
772 section_from_flat_range(&view
->ranges
[i
], view
);
773 flatview_add_to_dispatch(view
, &mrs
);
775 address_space_dispatch_compact(view
->dispatch
);
776 g_hash_table_replace(flat_views
, mr
, view
);
781 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
782 MemoryRegionIoeventfd
*fds_new
,
784 MemoryRegionIoeventfd
*fds_old
,
788 MemoryRegionIoeventfd
*fd
;
789 MemoryRegionSection section
;
791 /* Generate a symmetric difference of the old and new fd sets, adding
792 * and deleting as necessary.
796 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
797 if (iold
< fds_old_nb
798 && (inew
== fds_new_nb
799 || memory_region_ioeventfd_before(&fds_old
[iold
],
802 section
= (MemoryRegionSection
) {
803 .fv
= address_space_to_flatview(as
),
804 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
805 .size
= fd
->addr
.size
,
807 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
808 fd
->match_data
, fd
->data
, fd
->e
);
810 } else if (inew
< fds_new_nb
811 && (iold
== fds_old_nb
812 || memory_region_ioeventfd_before(&fds_new
[inew
],
815 section
= (MemoryRegionSection
) {
816 .fv
= address_space_to_flatview(as
),
817 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
818 .size
= fd
->addr
.size
,
820 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
821 fd
->match_data
, fd
->data
, fd
->e
);
830 FlatView
*address_space_get_flatview(AddressSpace
*as
)
834 RCU_READ_LOCK_GUARD();
836 view
= address_space_to_flatview(as
);
837 /* If somebody has replaced as->current_map concurrently,
838 * flatview_ref returns false.
840 } while (!flatview_ref(view
));
844 static void address_space_update_ioeventfds(AddressSpace
*as
)
848 unsigned ioeventfd_nb
= 0;
849 unsigned ioeventfd_max
;
850 MemoryRegionIoeventfd
*ioeventfds
;
854 if (!as
->ioeventfd_notifiers
) {
859 * It is likely that the number of ioeventfds hasn't changed much, so use
860 * the previous size as the starting value, with some headroom to avoid
861 * gratuitous reallocations.
863 ioeventfd_max
= QEMU_ALIGN_UP(as
->ioeventfd_nb
, 4);
864 ioeventfds
= g_new(MemoryRegionIoeventfd
, ioeventfd_max
);
866 view
= address_space_get_flatview(as
);
867 FOR_EACH_FLAT_RANGE(fr
, view
) {
868 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
869 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
870 int128_sub(fr
->addr
.start
,
871 int128_make64(fr
->offset_in_region
)));
872 if (addrrange_intersects(fr
->addr
, tmp
)) {
874 if (ioeventfd_nb
> ioeventfd_max
) {
875 ioeventfd_max
= MAX(ioeventfd_max
* 2, 4);
876 ioeventfds
= g_realloc(ioeventfds
,
877 ioeventfd_max
* sizeof(*ioeventfds
));
879 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
880 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
885 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
886 as
->ioeventfds
, as
->ioeventfd_nb
);
888 g_free(as
->ioeventfds
);
889 as
->ioeventfds
= ioeventfds
;
890 as
->ioeventfd_nb
= ioeventfd_nb
;
891 flatview_unref(view
);
895 * Notify the memory listeners about the coalesced IO change events of
896 * range `cmr'. Only the part that has intersection of the specified
897 * FlatRange will be sent.
899 static void flat_range_coalesced_io_notify(FlatRange
*fr
, AddressSpace
*as
,
900 CoalescedMemoryRange
*cmr
, bool add
)
904 tmp
= addrrange_shift(cmr
->addr
,
905 int128_sub(fr
->addr
.start
,
906 int128_make64(fr
->offset_in_region
)));
907 if (!addrrange_intersects(tmp
, fr
->addr
)) {
910 tmp
= addrrange_intersection(tmp
, fr
->addr
);
913 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, coalesced_io_add
,
914 int128_get64(tmp
.start
),
915 int128_get64(tmp
.size
));
917 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Reverse
, coalesced_io_del
,
918 int128_get64(tmp
.start
),
919 int128_get64(tmp
.size
));
923 static void flat_range_coalesced_io_del(FlatRange
*fr
, AddressSpace
*as
)
925 CoalescedMemoryRange
*cmr
;
927 QTAILQ_FOREACH(cmr
, &fr
->mr
->coalesced
, link
) {
928 flat_range_coalesced_io_notify(fr
, as
, cmr
, false);
932 static void flat_range_coalesced_io_add(FlatRange
*fr
, AddressSpace
*as
)
934 MemoryRegion
*mr
= fr
->mr
;
935 CoalescedMemoryRange
*cmr
;
937 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
941 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
942 flat_range_coalesced_io_notify(fr
, as
, cmr
, true);
946 static void address_space_update_topology_pass(AddressSpace
*as
,
947 const FlatView
*old_view
,
948 const FlatView
*new_view
,
952 FlatRange
*frold
, *frnew
;
954 /* Generate a symmetric difference of the old and new memory maps.
955 * Kill ranges in the old map, and instantiate ranges in the new map.
958 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
959 if (iold
< old_view
->nr
) {
960 frold
= &old_view
->ranges
[iold
];
964 if (inew
< new_view
->nr
) {
965 frnew
= &new_view
->ranges
[inew
];
972 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
973 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
974 && !flatrange_equal(frold
, frnew
)))) {
975 /* In old but not in new, or in both but attributes changed. */
978 flat_range_coalesced_io_del(frold
, as
);
979 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
983 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
984 /* In both and unchanged (except logging may have changed) */
987 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
988 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
989 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
990 frold
->dirty_log_mask
,
991 frnew
->dirty_log_mask
);
993 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
994 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
995 frold
->dirty_log_mask
,
996 frnew
->dirty_log_mask
);
1006 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
1007 flat_range_coalesced_io_add(frnew
, as
);
1015 static void flatviews_init(void)
1017 static FlatView
*empty_view
;
1023 flat_views
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
, NULL
,
1024 (GDestroyNotify
) flatview_unref
);
1026 empty_view
= generate_memory_topology(NULL
);
1027 /* We keep it alive forever in the global variable. */
1028 flatview_ref(empty_view
);
1030 g_hash_table_replace(flat_views
, NULL
, empty_view
);
1031 flatview_ref(empty_view
);
1035 static void flatviews_reset(void)
1040 g_hash_table_unref(flat_views
);
1045 /* Render unique FVs */
1046 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1047 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1049 if (g_hash_table_lookup(flat_views
, physmr
)) {
1053 generate_memory_topology(physmr
);
1057 static void address_space_set_flatview(AddressSpace
*as
)
1059 FlatView
*old_view
= address_space_to_flatview(as
);
1060 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1061 FlatView
*new_view
= g_hash_table_lookup(flat_views
, physmr
);
1065 if (old_view
== new_view
) {
1070 flatview_ref(old_view
);
1073 flatview_ref(new_view
);
1075 if (!QTAILQ_EMPTY(&as
->listeners
)) {
1076 FlatView tmpview
= { .nr
= 0 }, *old_view2
= old_view
;
1079 old_view2
= &tmpview
;
1081 address_space_update_topology_pass(as
, old_view2
, new_view
, false);
1082 address_space_update_topology_pass(as
, old_view2
, new_view
, true);
1085 /* Writes are protected by the BQL. */
1086 qatomic_rcu_set(&as
->current_map
, new_view
);
1088 flatview_unref(old_view
);
1091 /* Note that all the old MemoryRegions are still alive up to this
1092 * point. This relieves most MemoryListeners from the need to
1093 * ref/unref the MemoryRegions they get---unless they use them
1094 * outside the iothread mutex, in which case precise reference
1095 * counting is necessary.
1098 flatview_unref(old_view
);
1102 static void address_space_update_topology(AddressSpace
*as
)
1104 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1107 if (!g_hash_table_lookup(flat_views
, physmr
)) {
1108 generate_memory_topology(physmr
);
1110 address_space_set_flatview(as
);
1113 void memory_region_transaction_begin(void)
1115 qemu_flush_coalesced_mmio_buffer();
1116 ++memory_region_transaction_depth
;
1119 void memory_region_transaction_commit(void)
1123 assert(memory_region_transaction_depth
);
1124 assert(bql_locked());
1126 --memory_region_transaction_depth
;
1127 if (!memory_region_transaction_depth
) {
1128 if (memory_region_update_pending
) {
1131 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
1133 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1134 address_space_set_flatview(as
);
1135 address_space_update_ioeventfds(as
);
1137 memory_region_update_pending
= false;
1138 ioeventfd_update_pending
= false;
1139 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
1140 } else if (ioeventfd_update_pending
) {
1141 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1142 address_space_update_ioeventfds(as
);
1144 ioeventfd_update_pending
= false;
1149 static void memory_region_destructor_none(MemoryRegion
*mr
)
1153 static void memory_region_destructor_ram(MemoryRegion
*mr
)
1155 qemu_ram_free(mr
->ram_block
);
1158 static bool memory_region_need_escape(char c
)
1160 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
1163 static char *memory_region_escape_name(const char *name
)
1170 for (p
= name
; *p
; p
++) {
1171 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
1173 if (bytes
== p
- name
) {
1174 return g_memdup(name
, bytes
+ 1);
1177 escaped
= g_malloc(bytes
+ 1);
1178 for (p
= name
, q
= escaped
; *p
; p
++) {
1180 if (unlikely(memory_region_need_escape(c
))) {
1183 *q
++ = "0123456789abcdef"[c
>> 4];
1184 c
= "0123456789abcdef"[c
& 15];
1192 static void memory_region_do_init(MemoryRegion
*mr
,
1197 mr
->size
= int128_make64(size
);
1198 if (size
== UINT64_MAX
) {
1199 mr
->size
= int128_2_64();
1201 mr
->name
= g_strdup(name
);
1203 mr
->dev
= (DeviceState
*) object_dynamic_cast(mr
->owner
, TYPE_DEVICE
);
1204 mr
->ram_block
= NULL
;
1207 char *escaped_name
= memory_region_escape_name(name
);
1208 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1211 owner
= container_get(qdev_get_machine(), "/unattached");
1214 object_property_add_child(owner
, name_array
, OBJECT(mr
));
1215 object_unref(OBJECT(mr
));
1217 g_free(escaped_name
);
1221 void memory_region_init(MemoryRegion
*mr
,
1226 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
1227 memory_region_do_init(mr
, owner
, name
, size
);
1230 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1231 const char *name
, void *opaque
,
1234 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1235 char *path
= (char *)"";
1237 if (mr
->container
) {
1238 path
= object_get_canonical_path(OBJECT(mr
->container
));
1240 visit_type_str(v
, name
, &path
, errp
);
1241 if (mr
->container
) {
1246 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1249 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1251 return OBJECT(mr
->container
);
1254 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1255 const char *name
, void *opaque
,
1258 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1259 int32_t value
= mr
->priority
;
1261 visit_type_int32(v
, name
, &value
, errp
);
1264 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1265 void *opaque
, Error
**errp
)
1267 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1268 uint64_t value
= memory_region_size(mr
);
1270 visit_type_uint64(v
, name
, &value
, errp
);
1273 static void memory_region_initfn(Object
*obj
)
1275 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1278 mr
->ops
= &unassigned_mem_ops
;
1280 mr
->romd_mode
= true;
1281 mr
->destructor
= memory_region_destructor_none
;
1282 QTAILQ_INIT(&mr
->subregions
);
1283 QTAILQ_INIT(&mr
->coalesced
);
1285 op
= object_property_add(OBJECT(mr
), "container",
1286 "link<" TYPE_MEMORY_REGION
">",
1287 memory_region_get_container
,
1288 NULL
, /* memory_region_set_container */
1290 op
->resolve
= memory_region_resolve_container
;
1292 object_property_add_uint64_ptr(OBJECT(mr
), "addr",
1293 &mr
->addr
, OBJ_PROP_FLAG_READ
);
1294 object_property_add(OBJECT(mr
), "priority", "uint32",
1295 memory_region_get_priority
,
1296 NULL
, /* memory_region_set_priority */
1298 object_property_add(OBJECT(mr
), "size", "uint64",
1299 memory_region_get_size
,
1300 NULL
, /* memory_region_set_size, */
1304 static int qemu_target_backtrace(target_ulong
*array
, size_t size
)
1308 #if defined(TARGET_ARM)
1309 CPUArchState
*env
= cpu_env(current_cpu
);
1310 array
[0] = env
->regs
[15];
1311 array
[1] = env
->regs
[14];
1312 #elif defined(TARGET_MIPS)
1313 CPUArchState
*env
= cpu_env(current_cpu
);
1314 array
[0] = env
->active_tc
.PC
;
1315 array
[1] = env
->active_tc
.gpr
[31];
1325 #include "disas/disas.h"
1326 const char *qemu_sprint_backtrace(char *buffer
, size_t length
)
1330 target_ulong caller
[2];
1332 qemu_target_backtrace(caller
, 2);
1333 symbol
= lookup_symbol(caller
[0]);
1334 p
+= sprintf(p
, "[%s]", symbol
);
1335 symbol
= lookup_symbol(caller
[1]);
1336 p
+= sprintf(p
, "[%s]", symbol
);
1338 p
+= sprintf(p
, "[cpu not running]");
1340 assert((p
- buffer
) < length
);
1344 static void iommu_memory_region_initfn(Object
*obj
)
1346 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1348 mr
->is_iommu
= true;
1351 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1354 if (trace_unassigned
) {
1356 printf("Unassigned mem read " HWADDR_FMT_plx
" %s\n",
1357 addr
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1363 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1364 uint64_t val
, unsigned size
)
1366 if (trace_unassigned
) {
1368 printf("Unassigned mem write " HWADDR_FMT_plx
1369 " = 0x%" PRIx64
" %s\n",
1370 addr
, val
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1374 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1375 unsigned size
, bool is_write
,
1381 const MemoryRegionOps unassigned_mem_ops
= {
1382 .valid
.accepts
= unassigned_mem_accepts
,
1383 .endianness
= DEVICE_NATIVE_ENDIAN
,
1386 static uint64_t memory_region_ram_device_read(void *opaque
,
1387 hwaddr addr
, unsigned size
)
1389 MemoryRegion
*mr
= opaque
;
1390 uint64_t data
= ldn_he_p(mr
->ram_block
->host
+ addr
, size
);
1392 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1397 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1398 uint64_t data
, unsigned size
)
1400 MemoryRegion
*mr
= opaque
;
1402 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1404 stn_he_p(mr
->ram_block
->host
+ addr
, size
, data
);
1407 static const MemoryRegionOps ram_device_mem_ops
= {
1408 .read
= memory_region_ram_device_read
,
1409 .write
= memory_region_ram_device_write
,
1410 .endianness
= DEVICE_HOST_ENDIAN
,
1412 .min_access_size
= 1,
1413 .max_access_size
= 8,
1417 .min_access_size
= 1,
1418 .max_access_size
= 8,
1423 bool memory_region_access_valid(MemoryRegion
*mr
,
1429 if (mr
->ops
->valid
.accepts
1430 && !mr
->ops
->valid
.accepts(mr
->opaque
, addr
, size
, is_write
, attrs
)) {
1431 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid %s at addr 0x%" HWADDR_PRIX
1432 ", size %u, region '%s', reason: rejected\n",
1433 is_write
? "write" : "read",
1434 addr
, size
, memory_region_name(mr
));
1438 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1439 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid %s at addr 0x%" HWADDR_PRIX
1440 ", size %u, region '%s', reason: unaligned\n",
1441 is_write
? "write" : "read",
1442 addr
, size
, memory_region_name(mr
));
1446 /* Treat zero as compatibility all valid */
1447 if (!mr
->ops
->valid
.max_access_size
) {
1451 if (size
> mr
->ops
->valid
.max_access_size
1452 || size
< mr
->ops
->valid
.min_access_size
) {
1453 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid %s at addr 0x%" HWADDR_PRIX
1454 ", size %u, region '%s', reason: invalid size "
1455 "(min:%u max:%u)\n",
1456 is_write
? "write" : "read",
1457 addr
, size
, memory_region_name(mr
),
1458 mr
->ops
->valid
.min_access_size
,
1459 mr
->ops
->valid
.max_access_size
);
1465 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1473 if (mr
->ops
->read
) {
1474 return access_with_adjusted_size(addr
, pval
, size
,
1475 mr
->ops
->impl
.min_access_size
,
1476 mr
->ops
->impl
.max_access_size
,
1477 memory_region_read_accessor
,
1480 return access_with_adjusted_size(addr
, pval
, size
,
1481 mr
->ops
->impl
.min_access_size
,
1482 mr
->ops
->impl
.max_access_size
,
1483 memory_region_read_with_attrs_accessor
,
1488 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1494 unsigned size
= memop_size(op
);
1498 return memory_region_dispatch_read(mr
->alias
,
1499 mr
->alias_offset
+ addr
,
1502 if (!memory_region_access_valid(mr
, addr
, size
, false, attrs
)) {
1503 *pval
= unassigned_mem_read(mr
, addr
, size
);
1504 return MEMTX_DECODE_ERROR
;
1507 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1508 adjust_endianness(mr
, pval
, op
);
1512 /* Return true if an eventfd was signalled */
1513 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1519 MemoryRegionIoeventfd ioeventfd
= {
1520 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1525 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1526 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1527 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1529 if (memory_region_ioeventfd_equal(&ioeventfd
, &mr
->ioeventfds
[i
])) {
1530 event_notifier_set(ioeventfd
.e
);
1538 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1544 unsigned size
= memop_size(op
);
1547 return memory_region_dispatch_write(mr
->alias
,
1548 mr
->alias_offset
+ addr
,
1551 if (!memory_region_access_valid(mr
, addr
, size
, true, attrs
)) {
1552 unassigned_mem_write(mr
, addr
, data
, size
);
1553 return MEMTX_DECODE_ERROR
;
1556 adjust_endianness(mr
, &data
, op
);
1559 * FIXME: it's not clear why under KVM the write would be processed
1560 * directly, instead of going through eventfd. This probably should
1561 * test "tcg_enabled() || qtest_enabled()", or should just go away.
1563 if (!kvm_enabled() &&
1564 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1568 if (mr
->ops
->write
) {
1569 return access_with_adjusted_size(addr
, &data
, size
,
1570 mr
->ops
->impl
.min_access_size
,
1571 mr
->ops
->impl
.max_access_size
,
1572 memory_region_write_accessor
, mr
,
1576 access_with_adjusted_size(addr
, &data
, size
,
1577 mr
->ops
->impl
.min_access_size
,
1578 mr
->ops
->impl
.max_access_size
,
1579 memory_region_write_with_attrs_accessor
,
1584 void memory_region_init_io(MemoryRegion
*mr
,
1586 const MemoryRegionOps
*ops
,
1591 memory_region_init(mr
, owner
, name
, size
);
1592 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1593 mr
->opaque
= opaque
;
1594 mr
->terminates
= true;
1597 bool memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
1603 return memory_region_init_ram_flags_nomigrate(mr
, owner
, name
,
1607 bool memory_region_init_ram_flags_nomigrate(MemoryRegion
*mr
,
1615 memory_region_init(mr
, owner
, name
, size
);
1617 mr
->terminates
= true;
1618 mr
->destructor
= memory_region_destructor_ram
;
1619 mr
->ram_block
= qemu_ram_alloc(size
, ram_flags
, mr
, &err
);
1621 mr
->size
= int128_zero();
1622 object_unparent(OBJECT(mr
));
1623 error_propagate(errp
, err
);
1629 bool memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1634 void (*resized
)(const char*,
1640 memory_region_init(mr
, owner
, name
, size
);
1642 mr
->terminates
= true;
1643 mr
->destructor
= memory_region_destructor_ram
;
1644 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1647 mr
->size
= int128_zero();
1648 object_unparent(OBJECT(mr
));
1649 error_propagate(errp
, err
);
1656 bool memory_region_init_ram_from_file(MemoryRegion
*mr
,
1667 memory_region_init(mr
, owner
, name
, size
);
1669 mr
->readonly
= !!(ram_flags
& RAM_READONLY
);
1670 mr
->terminates
= true;
1671 mr
->destructor
= memory_region_destructor_ram
;
1673 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, ram_flags
, path
,
1676 mr
->size
= int128_zero();
1677 object_unparent(OBJECT(mr
));
1678 error_propagate(errp
, err
);
1684 bool memory_region_init_ram_from_fd(MemoryRegion
*mr
,
1694 memory_region_init(mr
, owner
, name
, size
);
1696 mr
->readonly
= !!(ram_flags
& RAM_READONLY
);
1697 mr
->terminates
= true;
1698 mr
->destructor
= memory_region_destructor_ram
;
1699 mr
->ram_block
= qemu_ram_alloc_from_fd(size
, mr
, ram_flags
, fd
, offset
,
1702 mr
->size
= int128_zero();
1703 object_unparent(OBJECT(mr
));
1704 error_propagate(errp
, err
);
1711 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1717 memory_region_init(mr
, owner
, name
, size
);
1719 mr
->terminates
= true;
1720 mr
->destructor
= memory_region_destructor_ram
;
1722 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1723 assert(ptr
!= NULL
);
1724 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_abort
);
1727 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1733 memory_region_init(mr
, owner
, name
, size
);
1735 mr
->terminates
= true;
1736 mr
->ram_device
= true;
1737 mr
->ops
= &ram_device_mem_ops
;
1739 mr
->destructor
= memory_region_destructor_ram
;
1741 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1742 assert(ptr
!= NULL
);
1743 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_abort
);
1746 void memory_region_init_alias(MemoryRegion
*mr
,
1753 memory_region_init(mr
, owner
, name
, size
);
1755 mr
->alias_offset
= offset
;
1758 bool memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
1764 if (!memory_region_init_ram_flags_nomigrate(mr
, owner
, name
,
1768 mr
->readonly
= true;
1773 bool memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
1775 const MemoryRegionOps
*ops
,
1783 memory_region_init(mr
, owner
, name
, size
);
1785 mr
->opaque
= opaque
;
1786 mr
->terminates
= true;
1787 mr
->rom_device
= true;
1788 mr
->destructor
= memory_region_destructor_ram
;
1789 mr
->ram_block
= qemu_ram_alloc(size
, 0, mr
, &err
);
1791 mr
->size
= int128_zero();
1792 object_unparent(OBJECT(mr
));
1793 error_propagate(errp
, err
);
1799 void memory_region_init_iommu(void *_iommu_mr
,
1800 size_t instance_size
,
1801 const char *mrtypename
,
1806 struct IOMMUMemoryRegion
*iommu_mr
;
1807 struct MemoryRegion
*mr
;
1809 object_initialize(_iommu_mr
, instance_size
, mrtypename
);
1810 mr
= MEMORY_REGION(_iommu_mr
);
1811 memory_region_do_init(mr
, owner
, name
, size
);
1812 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1813 mr
->terminates
= true; /* then re-forwards */
1814 QLIST_INIT(&iommu_mr
->iommu_notify
);
1815 iommu_mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1818 static void memory_region_finalize(Object
*obj
)
1820 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1822 assert(!mr
->container
);
1824 /* We know the region is not visible in any address space (it
1825 * does not have a container and cannot be a root either because
1826 * it has no references, so we can blindly clear mr->enabled.
1827 * memory_region_set_enabled instead could trigger a transaction
1828 * and cause an infinite loop.
1830 mr
->enabled
= false;
1831 memory_region_transaction_begin();
1832 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1833 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1834 memory_region_del_subregion(mr
, subregion
);
1836 memory_region_transaction_commit();
1839 memory_region_clear_coalescing(mr
);
1840 g_free((char *)mr
->name
);
1841 g_free(mr
->ioeventfds
);
1844 Object
*memory_region_owner(MemoryRegion
*mr
)
1846 Object
*obj
= OBJECT(mr
);
1850 void memory_region_ref(MemoryRegion
*mr
)
1852 /* MMIO callbacks most likely will access data that belongs
1853 * to the owner, hence the need to ref/unref the owner whenever
1854 * the memory region is in use.
1856 * The memory region is a child of its owner. As long as the
1857 * owner doesn't call unparent itself on the memory region,
1858 * ref-ing the owner will also keep the memory region alive.
1859 * Memory regions without an owner are supposed to never go away;
1860 * we do not ref/unref them because it slows down DMA sensibly.
1862 if (mr
&& mr
->owner
) {
1863 object_ref(mr
->owner
);
1867 void memory_region_unref(MemoryRegion
*mr
)
1869 if (mr
&& mr
->owner
) {
1870 object_unref(mr
->owner
);
1874 uint64_t memory_region_size(MemoryRegion
*mr
)
1876 if (int128_eq(mr
->size
, int128_2_64())) {
1879 return int128_get64(mr
->size
);
1882 const char *memory_region_name(const MemoryRegion
*mr
)
1885 ((MemoryRegion
*)mr
)->name
=
1886 g_strdup(object_get_canonical_path_component(OBJECT(mr
)));
1891 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1893 return mr
->ram_device
;
1896 bool memory_region_is_protected(MemoryRegion
*mr
)
1898 return mr
->ram
&& (mr
->ram_block
->flags
& RAM_PROTECTED
);
1901 bool memory_region_has_guest_memfd(MemoryRegion
*mr
)
1903 return mr
->ram_block
&& mr
->ram_block
->guest_memfd
>= 0;
1906 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1908 uint8_t mask
= mr
->dirty_log_mask
;
1909 RAMBlock
*rb
= mr
->ram_block
;
1911 if (global_dirty_tracking
&& ((rb
&& qemu_ram_is_migratable(rb
)) ||
1912 memory_region_is_iommu(mr
))) {
1913 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1916 if (tcg_enabled() && rb
) {
1917 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1918 mask
|= (1 << DIRTY_MEMORY_CODE
);
1923 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1925 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1928 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion
*iommu_mr
,
1931 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1932 IOMMUNotifier
*iommu_notifier
;
1933 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1936 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1937 flags
|= iommu_notifier
->notifier_flags
;
1940 if (flags
!= iommu_mr
->iommu_notify_flags
&& imrc
->notify_flag_changed
) {
1941 ret
= imrc
->notify_flag_changed(iommu_mr
,
1942 iommu_mr
->iommu_notify_flags
,
1947 iommu_mr
->iommu_notify_flags
= flags
;
1952 int memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1953 IOMMUNotifier
*n
, Error
**errp
)
1955 IOMMUMemoryRegion
*iommu_mr
;
1959 return memory_region_register_iommu_notifier(mr
->alias
, n
, errp
);
1962 /* We need to register for at least one bitfield */
1963 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1964 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1965 assert(n
->start
<= n
->end
);
1966 assert(n
->iommu_idx
>= 0 &&
1967 n
->iommu_idx
< memory_region_iommu_num_indexes(iommu_mr
));
1969 QLIST_INSERT_HEAD(&iommu_mr
->iommu_notify
, n
, node
);
1970 ret
= memory_region_update_iommu_notify_flags(iommu_mr
, errp
);
1972 QLIST_REMOVE(n
, node
);
1977 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
)
1979 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1981 if (imrc
->get_min_page_size
) {
1982 return imrc
->get_min_page_size(iommu_mr
);
1984 return TARGET_PAGE_SIZE
;
1987 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
1989 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
1990 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1991 hwaddr addr
, granularity
;
1992 IOMMUTLBEntry iotlb
;
1994 /* If the IOMMU has its own replay callback, override */
1996 imrc
->replay(iommu_mr
, n
);
2000 granularity
= memory_region_iommu_get_min_page_size(iommu_mr
);
2002 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
2003 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
, n
->iommu_idx
);
2004 if (iotlb
.perm
!= IOMMU_NONE
) {
2005 n
->notify(n
, &iotlb
);
2008 /* if (2^64 - MR size) < granularity, it's possible to get an
2009 * infinite loop here. This should catch such a wraparound */
2010 if ((addr
+ granularity
) < addr
) {
2016 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
2019 IOMMUMemoryRegion
*iommu_mr
;
2022 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
2025 QLIST_REMOVE(n
, node
);
2026 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
2027 memory_region_update_iommu_notify_flags(iommu_mr
, NULL
);
2030 void memory_region_notify_iommu_one(IOMMUNotifier
*notifier
,
2031 const IOMMUTLBEvent
*event
)
2033 const IOMMUTLBEntry
*entry
= &event
->entry
;
2034 hwaddr entry_end
= entry
->iova
+ entry
->addr_mask
;
2035 IOMMUTLBEntry tmp
= *entry
;
2037 if (event
->type
== IOMMU_NOTIFIER_UNMAP
) {
2038 assert(entry
->perm
== IOMMU_NONE
);
2042 * Skip the notification if the notification does not overlap
2043 * with registered range.
2045 if (notifier
->start
> entry_end
|| notifier
->end
< entry
->iova
) {
2049 if (notifier
->notifier_flags
& IOMMU_NOTIFIER_DEVIOTLB_UNMAP
) {
2050 /* Crop (iova, addr_mask) to range */
2051 tmp
.iova
= MAX(tmp
.iova
, notifier
->start
);
2052 tmp
.addr_mask
= MIN(entry_end
, notifier
->end
) - tmp
.iova
;
2054 assert(entry
->iova
>= notifier
->start
&& entry_end
<= notifier
->end
);
2057 if (event
->type
& notifier
->notifier_flags
) {
2058 notifier
->notify(notifier
, &tmp
);
2062 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier
*notifier
)
2064 IOMMUTLBEvent event
;
2066 event
.type
= IOMMU_NOTIFIER_UNMAP
;
2067 event
.entry
.target_as
= &address_space_memory
;
2068 event
.entry
.iova
= notifier
->start
;
2069 event
.entry
.perm
= IOMMU_NONE
;
2070 event
.entry
.addr_mask
= notifier
->end
- notifier
->start
;
2072 memory_region_notify_iommu_one(notifier
, &event
);
2075 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
2077 const IOMMUTLBEvent event
)
2079 IOMMUNotifier
*iommu_notifier
;
2081 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr
)));
2083 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
2084 if (iommu_notifier
->iommu_idx
== iommu_idx
) {
2085 memory_region_notify_iommu_one(iommu_notifier
, &event
);
2090 int memory_region_iommu_get_attr(IOMMUMemoryRegion
*iommu_mr
,
2091 enum IOMMUMemoryRegionAttr attr
,
2094 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2096 if (!imrc
->get_attr
) {
2100 return imrc
->get_attr(iommu_mr
, attr
, data
);
2103 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion
*iommu_mr
,
2106 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2108 if (!imrc
->attrs_to_index
) {
2112 return imrc
->attrs_to_index(iommu_mr
, attrs
);
2115 int memory_region_iommu_num_indexes(IOMMUMemoryRegion
*iommu_mr
)
2117 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2119 if (!imrc
->num_indexes
) {
2123 return imrc
->num_indexes(iommu_mr
);
2126 RamDiscardManager
*memory_region_get_ram_discard_manager(MemoryRegion
*mr
)
2128 if (!memory_region_is_ram(mr
)) {
2134 void memory_region_set_ram_discard_manager(MemoryRegion
*mr
,
2135 RamDiscardManager
*rdm
)
2137 g_assert(memory_region_is_ram(mr
));
2138 g_assert(!rdm
|| !mr
->rdm
);
2142 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager
*rdm
,
2143 const MemoryRegion
*mr
)
2145 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2147 g_assert(rdmc
->get_min_granularity
);
2148 return rdmc
->get_min_granularity(rdm
, mr
);
2151 bool ram_discard_manager_is_populated(const RamDiscardManager
*rdm
,
2152 const MemoryRegionSection
*section
)
2154 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2156 g_assert(rdmc
->is_populated
);
2157 return rdmc
->is_populated(rdm
, section
);
2160 int ram_discard_manager_replay_populated(const RamDiscardManager
*rdm
,
2161 MemoryRegionSection
*section
,
2162 ReplayRamPopulate replay_fn
,
2165 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2167 g_assert(rdmc
->replay_populated
);
2168 return rdmc
->replay_populated(rdm
, section
, replay_fn
, opaque
);
2171 void ram_discard_manager_replay_discarded(const RamDiscardManager
*rdm
,
2172 MemoryRegionSection
*section
,
2173 ReplayRamDiscard replay_fn
,
2176 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2178 g_assert(rdmc
->replay_discarded
);
2179 rdmc
->replay_discarded(rdm
, section
, replay_fn
, opaque
);
2182 void ram_discard_manager_register_listener(RamDiscardManager
*rdm
,
2183 RamDiscardListener
*rdl
,
2184 MemoryRegionSection
*section
)
2186 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2188 g_assert(rdmc
->register_listener
);
2189 rdmc
->register_listener(rdm
, rdl
, section
);
2192 void ram_discard_manager_unregister_listener(RamDiscardManager
*rdm
,
2193 RamDiscardListener
*rdl
)
2195 RamDiscardManagerClass
*rdmc
= RAM_DISCARD_MANAGER_GET_CLASS(rdm
);
2197 g_assert(rdmc
->unregister_listener
);
2198 rdmc
->unregister_listener(rdm
, rdl
);
2201 /* Called with rcu_read_lock held. */
2202 bool memory_get_xlat_addr(IOMMUTLBEntry
*iotlb
, void **vaddr
,
2203 ram_addr_t
*ram_addr
, bool *read_only
,
2204 bool *mr_has_discard_manager
, Error
**errp
)
2208 hwaddr len
= iotlb
->addr_mask
+ 1;
2209 bool writable
= iotlb
->perm
& IOMMU_WO
;
2211 if (mr_has_discard_manager
) {
2212 *mr_has_discard_manager
= false;
2215 * The IOMMU TLB entry we have just covers translation through
2216 * this IOMMU to its immediate target. We need to translate
2217 * it the rest of the way through to memory.
2219 mr
= address_space_translate(&address_space_memory
, iotlb
->translated_addr
,
2220 &xlat
, &len
, writable
, MEMTXATTRS_UNSPECIFIED
);
2221 if (!memory_region_is_ram(mr
)) {
2222 error_setg(errp
, "iommu map to non memory area %" HWADDR_PRIx
"", xlat
);
2224 } else if (memory_region_has_ram_discard_manager(mr
)) {
2225 RamDiscardManager
*rdm
= memory_region_get_ram_discard_manager(mr
);
2226 MemoryRegionSection tmp
= {
2228 .offset_within_region
= xlat
,
2229 .size
= int128_make64(len
),
2231 if (mr_has_discard_manager
) {
2232 *mr_has_discard_manager
= true;
2235 * Malicious VMs can map memory into the IOMMU, which is expected
2236 * to remain discarded. vfio will pin all pages, populating memory.
2237 * Disallow that. vmstate priorities make sure any RamDiscardManager
2238 * were already restored before IOMMUs are restored.
2240 if (!ram_discard_manager_is_populated(rdm
, &tmp
)) {
2241 error_setg(errp
, "iommu map to discarded memory (e.g., unplugged"
2242 " via virtio-mem): %" HWADDR_PRIx
"",
2243 iotlb
->translated_addr
);
2249 * Translation truncates length to the IOMMU page size,
2250 * check that it did not truncate too much.
2252 if (len
& iotlb
->addr_mask
) {
2253 error_setg(errp
, "iommu has granularity incompatible with target AS");
2258 *vaddr
= memory_region_get_ram_ptr(mr
) + xlat
;
2262 *ram_addr
= memory_region_get_ram_addr(mr
) + xlat
;
2266 *read_only
= !writable
|| mr
->readonly
;
2272 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
2274 uint8_t mask
= 1 << client
;
2275 uint8_t old_logging
;
2277 assert(client
== DIRTY_MEMORY_VGA
);
2278 old_logging
= mr
->vga_logging_count
;
2279 mr
->vga_logging_count
+= log
? 1 : -1;
2280 if (!!old_logging
== !!mr
->vga_logging_count
) {
2284 memory_region_transaction_begin();
2285 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
2286 memory_region_update_pending
|= mr
->enabled
;
2287 memory_region_transaction_commit();
2290 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2293 assert(mr
->ram_block
);
2294 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
2296 memory_region_get_dirty_log_mask(mr
));
2300 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2301 * dirty bitmap for the specified memory region.
2303 static void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
, bool last_stage
)
2305 MemoryListener
*listener
;
2310 /* If the same address space has multiple log_sync listeners, we
2311 * visit that address space's FlatView multiple times. But because
2312 * log_sync listeners are rare, it's still cheaper than walking each
2313 * address space once.
2315 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2316 if (listener
->log_sync
) {
2317 as
= listener
->address_space
;
2318 view
= address_space_get_flatview(as
);
2319 FOR_EACH_FLAT_RANGE(fr
, view
) {
2320 if (fr
->dirty_log_mask
&& (!mr
|| fr
->mr
== mr
)) {
2321 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
2322 listener
->log_sync(listener
, &mrs
);
2325 flatview_unref(view
);
2326 trace_memory_region_sync_dirty(mr
? mr
->name
: "(all)", listener
->name
, 0);
2327 } else if (listener
->log_sync_global
) {
2329 * No matter whether MR is specified, what we can do here
2330 * is to do a global sync, because we are not capable to
2331 * sync in a finer granularity.
2333 listener
->log_sync_global(listener
, last_stage
);
2334 trace_memory_region_sync_dirty(mr
? mr
->name
: "(all)", listener
->name
, 1);
2339 void memory_region_clear_dirty_bitmap(MemoryRegion
*mr
, hwaddr start
,
2342 MemoryRegionSection mrs
;
2343 MemoryListener
*listener
;
2347 hwaddr sec_start
, sec_end
, sec_size
;
2349 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2350 if (!listener
->log_clear
) {
2353 as
= listener
->address_space
;
2354 view
= address_space_get_flatview(as
);
2355 FOR_EACH_FLAT_RANGE(fr
, view
) {
2356 if (!fr
->dirty_log_mask
|| fr
->mr
!= mr
) {
2358 * Clear dirty bitmap operation only applies to those
2359 * regions whose dirty logging is at least enabled
2364 mrs
= section_from_flat_range(fr
, view
);
2366 sec_start
= MAX(mrs
.offset_within_region
, start
);
2367 sec_end
= mrs
.offset_within_region
+ int128_get64(mrs
.size
);
2368 sec_end
= MIN(sec_end
, start
+ len
);
2370 if (sec_start
>= sec_end
) {
2372 * If this memory region section has no intersection
2373 * with the requested range, skip.
2378 /* Valid case; shrink the section if needed */
2379 mrs
.offset_within_address_space
+=
2380 sec_start
- mrs
.offset_within_region
;
2381 mrs
.offset_within_region
= sec_start
;
2382 sec_size
= sec_end
- sec_start
;
2383 mrs
.size
= int128_make64(sec_size
);
2384 listener
->log_clear(listener
, &mrs
);
2386 flatview_unref(view
);
2390 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
2395 DirtyBitmapSnapshot
*snapshot
;
2396 assert(mr
->ram_block
);
2397 memory_region_sync_dirty_bitmap(mr
, false);
2398 snapshot
= cpu_physical_memory_snapshot_and_clear_dirty(mr
, addr
, size
, client
);
2399 memory_global_after_dirty_log_sync();
2403 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
, DirtyBitmapSnapshot
*snap
,
2404 hwaddr addr
, hwaddr size
)
2406 assert(mr
->ram_block
);
2407 return cpu_physical_memory_snapshot_get_dirty(snap
,
2408 memory_region_get_ram_addr(mr
) + addr
, size
);
2411 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
2413 if (mr
->readonly
!= readonly
) {
2414 memory_region_transaction_begin();
2415 mr
->readonly
= readonly
;
2416 memory_region_update_pending
|= mr
->enabled
;
2417 memory_region_transaction_commit();
2421 void memory_region_set_nonvolatile(MemoryRegion
*mr
, bool nonvolatile
)
2423 if (mr
->nonvolatile
!= nonvolatile
) {
2424 memory_region_transaction_begin();
2425 mr
->nonvolatile
= nonvolatile
;
2426 memory_region_update_pending
|= mr
->enabled
;
2427 memory_region_transaction_commit();
2431 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
2433 if (mr
->romd_mode
!= romd_mode
) {
2434 memory_region_transaction_begin();
2435 mr
->romd_mode
= romd_mode
;
2436 memory_region_update_pending
|= mr
->enabled
;
2437 memory_region_transaction_commit();
2441 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
2442 hwaddr size
, unsigned client
)
2444 assert(mr
->ram_block
);
2445 cpu_physical_memory_test_and_clear_dirty(
2446 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2449 int memory_region_get_fd(MemoryRegion
*mr
)
2451 RCU_READ_LOCK_GUARD();
2455 return mr
->ram_block
->fd
;
2458 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
2460 uint64_t offset
= 0;
2462 RCU_READ_LOCK_GUARD();
2464 offset
+= mr
->alias_offset
;
2467 assert(mr
->ram_block
);
2468 return qemu_map_ram_ptr(mr
->ram_block
, offset
);
2471 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
2475 block
= qemu_ram_block_from_host(ptr
, false, offset
);
2483 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
2485 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
2488 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
2490 assert(mr
->ram_block
);
2492 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
2495 void memory_region_msync(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
)
2497 if (mr
->ram_block
) {
2498 qemu_ram_msync(mr
->ram_block
, addr
, size
);
2502 void memory_region_writeback(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
)
2505 * Might be extended case needed to cover
2506 * different types of memory regions
2508 if (mr
->dirty_log_mask
) {
2509 memory_region_msync(mr
, addr
, size
);
2514 * Call proper memory listeners about the change on the newly
2515 * added/removed CoalescedMemoryRange.
2517 static void memory_region_update_coalesced_range(MemoryRegion
*mr
,
2518 CoalescedMemoryRange
*cmr
,
2525 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2526 view
= address_space_get_flatview(as
);
2527 FOR_EACH_FLAT_RANGE(fr
, view
) {
2529 flat_range_coalesced_io_notify(fr
, as
, cmr
, add
);
2532 flatview_unref(view
);
2536 void memory_region_set_coalescing(MemoryRegion
*mr
)
2538 memory_region_clear_coalescing(mr
);
2539 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
2542 void memory_region_add_coalescing(MemoryRegion
*mr
,
2546 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
2548 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
2549 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
2550 memory_region_update_coalesced_range(mr
, cmr
, true);
2551 memory_region_set_flush_coalesced(mr
);
2554 void memory_region_clear_coalescing(MemoryRegion
*mr
)
2556 CoalescedMemoryRange
*cmr
;
2558 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2562 qemu_flush_coalesced_mmio_buffer();
2563 mr
->flush_coalesced_mmio
= false;
2565 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
2566 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
2567 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
2568 memory_region_update_coalesced_range(mr
, cmr
, false);
2573 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
2575 mr
->flush_coalesced_mmio
= true;
2578 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
2580 qemu_flush_coalesced_mmio_buffer();
2581 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2582 mr
->flush_coalesced_mmio
= false;
2586 void memory_region_add_eventfd(MemoryRegion
*mr
,
2593 MemoryRegionIoeventfd mrfd
= {
2594 .addr
.start
= int128_make64(addr
),
2595 .addr
.size
= int128_make64(size
),
2596 .match_data
= match_data
,
2603 adjust_endianness(mr
, &mrfd
.data
, size_memop(size
) | MO_TE
);
2605 memory_region_transaction_begin();
2606 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2607 if (memory_region_ioeventfd_before(&mrfd
, &mr
->ioeventfds
[i
])) {
2612 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2613 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2614 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2615 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2616 mr
->ioeventfds
[i
] = mrfd
;
2617 ioeventfd_update_pending
|= mr
->enabled
;
2618 memory_region_transaction_commit();
2621 void memory_region_del_eventfd(MemoryRegion
*mr
,
2628 MemoryRegionIoeventfd mrfd
= {
2629 .addr
.start
= int128_make64(addr
),
2630 .addr
.size
= int128_make64(size
),
2631 .match_data
= match_data
,
2638 adjust_endianness(mr
, &mrfd
.data
, size_memop(size
) | MO_TE
);
2640 memory_region_transaction_begin();
2641 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2642 if (memory_region_ioeventfd_equal(&mrfd
, &mr
->ioeventfds
[i
])) {
2646 assert(i
!= mr
->ioeventfd_nb
);
2647 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2648 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2650 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2651 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2652 ioeventfd_update_pending
|= mr
->enabled
;
2653 memory_region_transaction_commit();
2656 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2658 MemoryRegion
*mr
= subregion
->container
;
2659 MemoryRegion
*other
;
2661 memory_region_transaction_begin();
2663 memory_region_ref(subregion
);
2664 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2665 if (subregion
->priority
>= other
->priority
) {
2666 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2670 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2672 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2673 memory_region_transaction_commit();
2676 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2678 MemoryRegion
*subregion
)
2680 MemoryRegion
*alias
;
2682 assert(!subregion
->container
);
2683 subregion
->container
= mr
;
2684 for (alias
= subregion
->alias
; alias
; alias
= alias
->alias
) {
2685 alias
->mapped_via_alias
++;
2687 subregion
->addr
= offset
;
2688 memory_region_update_container_subregions(subregion
);
2691 void memory_region_add_subregion(MemoryRegion
*mr
,
2693 MemoryRegion
*subregion
)
2695 subregion
->priority
= 0;
2696 memory_region_add_subregion_common(mr
, offset
, subregion
);
2699 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2701 MemoryRegion
*subregion
,
2704 subregion
->priority
= priority
;
2705 memory_region_add_subregion_common(mr
, offset
, subregion
);
2708 void memory_region_del_subregion(MemoryRegion
*mr
,
2709 MemoryRegion
*subregion
)
2711 MemoryRegion
*alias
;
2713 memory_region_transaction_begin();
2714 assert(subregion
->container
== mr
);
2715 subregion
->container
= NULL
;
2716 for (alias
= subregion
->alias
; alias
; alias
= alias
->alias
) {
2717 alias
->mapped_via_alias
--;
2718 assert(alias
->mapped_via_alias
>= 0);
2720 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2721 memory_region_unref(subregion
);
2722 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2723 memory_region_transaction_commit();
2726 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2728 if (enabled
== mr
->enabled
) {
2731 memory_region_transaction_begin();
2732 mr
->enabled
= enabled
;
2733 memory_region_update_pending
= true;
2734 memory_region_transaction_commit();
2737 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2739 Int128 s
= int128_make64(size
);
2741 if (size
== UINT64_MAX
) {
2744 if (int128_eq(s
, mr
->size
)) {
2747 memory_region_transaction_begin();
2749 memory_region_update_pending
= true;
2750 memory_region_transaction_commit();
2753 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2755 MemoryRegion
*container
= mr
->container
;
2758 memory_region_transaction_begin();
2759 memory_region_ref(mr
);
2760 memory_region_del_subregion(container
, mr
);
2761 memory_region_add_subregion_common(container
, mr
->addr
, mr
);
2762 memory_region_unref(mr
);
2763 memory_region_transaction_commit();
2767 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2769 if (addr
!= mr
->addr
) {
2771 memory_region_readd_subregion(mr
);
2775 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2779 if (offset
== mr
->alias_offset
) {
2783 memory_region_transaction_begin();
2784 mr
->alias_offset
= offset
;
2785 memory_region_update_pending
|= mr
->enabled
;
2786 memory_region_transaction_commit();
2789 void memory_region_set_unmergeable(MemoryRegion
*mr
, bool unmergeable
)
2791 if (unmergeable
== mr
->unmergeable
) {
2795 memory_region_transaction_begin();
2796 mr
->unmergeable
= unmergeable
;
2797 memory_region_update_pending
|= mr
->enabled
;
2798 memory_region_transaction_commit();
2801 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2806 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2808 const AddrRange
*addr
= addr_
;
2809 const FlatRange
*fr
= fr_
;
2811 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2813 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2819 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2821 return bsearch(&addr
, view
->ranges
, view
->nr
,
2822 sizeof(FlatRange
), cmp_flatrange_addr
);
2825 bool memory_region_is_mapped(MemoryRegion
*mr
)
2827 return !!mr
->container
|| mr
->mapped_via_alias
;
2830 /* Same as memory_region_find, but it does not add a reference to the
2831 * returned region. It must be called from an RCU critical section.
2833 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2834 hwaddr addr
, uint64_t size
)
2836 MemoryRegionSection ret
= { .mr
= NULL
};
2844 for (root
= mr
; root
->container
; ) {
2845 root
= root
->container
;
2849 as
= memory_region_to_address_space(root
);
2853 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2855 view
= address_space_to_flatview(as
);
2856 fr
= flatview_lookup(view
, range
);
2861 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2867 range
= addrrange_intersection(range
, fr
->addr
);
2868 ret
.offset_within_region
= fr
->offset_in_region
;
2869 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2871 ret
.size
= range
.size
;
2872 ret
.offset_within_address_space
= int128_get64(range
.start
);
2873 ret
.readonly
= fr
->readonly
;
2874 ret
.nonvolatile
= fr
->nonvolatile
;
2878 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2879 hwaddr addr
, uint64_t size
)
2881 MemoryRegionSection ret
;
2882 RCU_READ_LOCK_GUARD();
2883 ret
= memory_region_find_rcu(mr
, addr
, size
);
2885 memory_region_ref(ret
.mr
);
2890 MemoryRegionSection
*memory_region_section_new_copy(MemoryRegionSection
*s
)
2892 MemoryRegionSection
*tmp
= g_new(MemoryRegionSection
, 1);
2896 memory_region_ref(tmp
->mr
);
2899 bool ret
= flatview_ref(tmp
->fv
);
2906 void memory_region_section_free_copy(MemoryRegionSection
*s
)
2909 flatview_unref(s
->fv
);
2912 memory_region_unref(s
->mr
);
2917 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2921 RCU_READ_LOCK_GUARD();
2922 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2923 return mr
&& mr
!= container
;
2926 void memory_global_dirty_log_sync(bool last_stage
)
2928 memory_region_sync_dirty_bitmap(NULL
, last_stage
);
2931 void memory_global_after_dirty_log_sync(void)
2933 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync
, Forward
);
2937 * Dirty track stop flags that are postponed due to VM being stopped. Should
2938 * only be used within vmstate_change hook.
2940 static unsigned int postponed_stop_flags
;
2941 static VMChangeStateEntry
*vmstate_change
;
2942 static void memory_global_dirty_log_stop_postponed_run(void);
2944 static bool memory_global_dirty_log_do_start(Error
**errp
)
2946 MemoryListener
*listener
;
2948 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2949 if (listener
->log_global_start
) {
2950 if (!listener
->log_global_start(listener
, errp
)) {
2958 while ((listener
= QTAILQ_PREV(listener
, link
)) != NULL
) {
2959 if (listener
->log_global_stop
) {
2960 listener
->log_global_stop(listener
);
2967 bool memory_global_dirty_log_start(unsigned int flags
, Error
**errp
)
2969 unsigned int old_flags
;
2971 assert(flags
&& !(flags
& (~GLOBAL_DIRTY_MASK
)));
2973 if (vmstate_change
) {
2974 /* If there is postponed stop(), operate on it first */
2975 postponed_stop_flags
&= ~flags
;
2976 memory_global_dirty_log_stop_postponed_run();
2979 flags
&= ~global_dirty_tracking
;
2984 old_flags
= global_dirty_tracking
;
2985 global_dirty_tracking
|= flags
;
2986 trace_global_dirty_changed(global_dirty_tracking
);
2989 if (!memory_global_dirty_log_do_start(errp
)) {
2990 global_dirty_tracking
&= ~flags
;
2991 trace_global_dirty_changed(global_dirty_tracking
);
2995 memory_region_transaction_begin();
2996 memory_region_update_pending
= true;
2997 memory_region_transaction_commit();
3002 static void memory_global_dirty_log_do_stop(unsigned int flags
)
3004 assert(flags
&& !(flags
& (~GLOBAL_DIRTY_MASK
)));
3005 assert((global_dirty_tracking
& flags
) == flags
);
3006 global_dirty_tracking
&= ~flags
;
3008 trace_global_dirty_changed(global_dirty_tracking
);
3010 if (!global_dirty_tracking
) {
3011 memory_region_transaction_begin();
3012 memory_region_update_pending
= true;
3013 memory_region_transaction_commit();
3014 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
3019 * Execute the postponed dirty log stop operations if there is, then reset
3020 * everything (including the flags and the vmstate change hook).
3022 static void memory_global_dirty_log_stop_postponed_run(void)
3024 /* This must be called with the vmstate handler registered */
3025 assert(vmstate_change
);
3027 /* Note: postponed_stop_flags can be cleared in log start routine */
3028 if (postponed_stop_flags
) {
3029 memory_global_dirty_log_do_stop(postponed_stop_flags
);
3030 postponed_stop_flags
= 0;
3033 qemu_del_vm_change_state_handler(vmstate_change
);
3034 vmstate_change
= NULL
;
3037 static void memory_vm_change_state_handler(void *opaque
, bool running
,
3041 memory_global_dirty_log_stop_postponed_run();
3045 void memory_global_dirty_log_stop(unsigned int flags
)
3047 if (!runstate_is_running()) {
3048 /* Postpone the dirty log stop, e.g., to when VM starts again */
3049 if (vmstate_change
) {
3050 /* Batch with previous postponed flags */
3051 postponed_stop_flags
|= flags
;
3053 postponed_stop_flags
= flags
;
3054 vmstate_change
= qemu_add_vm_change_state_handler(
3055 memory_vm_change_state_handler
, NULL
);
3060 memory_global_dirty_log_do_stop(flags
);
3063 static void listener_add_address_space(MemoryListener
*listener
,
3069 if (listener
->begin
) {
3070 listener
->begin(listener
);
3072 if (global_dirty_tracking
) {
3074 * Currently only VFIO can fail log_global_start(), and it's not
3075 * yet allowed to hotplug any PCI device during migration. So this
3076 * should never fail when invoked, guard it with error_abort. If
3077 * it can start to fail in the future, we need to be able to fail
3078 * the whole listener_add_address_space() and its callers.
3080 if (listener
->log_global_start
) {
3081 listener
->log_global_start(listener
, &error_abort
);
3085 view
= address_space_get_flatview(as
);
3086 FOR_EACH_FLAT_RANGE(fr
, view
) {
3087 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
3089 if (listener
->region_add
) {
3090 listener
->region_add(listener
, §ion
);
3092 if (fr
->dirty_log_mask
&& listener
->log_start
) {
3093 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
3096 if (listener
->commit
) {
3097 listener
->commit(listener
);
3099 flatview_unref(view
);
3102 static void listener_del_address_space(MemoryListener
*listener
,
3108 if (listener
->begin
) {
3109 listener
->begin(listener
);
3111 view
= address_space_get_flatview(as
);
3112 FOR_EACH_FLAT_RANGE(fr
, view
) {
3113 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
3115 if (fr
->dirty_log_mask
&& listener
->log_stop
) {
3116 listener
->log_stop(listener
, §ion
, fr
->dirty_log_mask
, 0);
3118 if (listener
->region_del
) {
3119 listener
->region_del(listener
, §ion
);
3122 if (listener
->commit
) {
3123 listener
->commit(listener
);
3125 flatview_unref(view
);
3128 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
3130 MemoryListener
*other
= NULL
;
3132 /* Only one of them can be defined for a listener */
3133 assert(!(listener
->log_sync
&& listener
->log_sync_global
));
3135 listener
->address_space
= as
;
3136 if (QTAILQ_EMPTY(&memory_listeners
)
3137 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
)->priority
) {
3138 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
3140 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
3141 if (listener
->priority
< other
->priority
) {
3145 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
3148 if (QTAILQ_EMPTY(&as
->listeners
)
3149 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
)->priority
) {
3150 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
3152 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
3153 if (listener
->priority
< other
->priority
) {
3157 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
3160 listener_add_address_space(listener
, as
);
3162 if (listener
->eventfd_add
|| listener
->eventfd_del
) {
3163 as
->ioeventfd_notifiers
++;
3167 void memory_listener_unregister(MemoryListener
*listener
)
3169 if (!listener
->address_space
) {
3173 if (listener
->eventfd_add
|| listener
->eventfd_del
) {
3174 listener
->address_space
->ioeventfd_notifiers
--;
3177 listener_del_address_space(listener
, listener
->address_space
);
3178 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
3179 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
3180 listener
->address_space
= NULL
;
3183 void address_space_remove_listeners(AddressSpace
*as
)
3185 while (!QTAILQ_EMPTY(&as
->listeners
)) {
3186 memory_listener_unregister(QTAILQ_FIRST(&as
->listeners
));
3190 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
3192 memory_region_ref(root
);
3194 as
->current_map
= NULL
;
3195 as
->ioeventfd_nb
= 0;
3196 as
->ioeventfds
= NULL
;
3197 QTAILQ_INIT(&as
->listeners
);
3198 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
3199 as
->bounce
.in_use
= false;
3200 qemu_mutex_init(&as
->map_client_list_lock
);
3201 QLIST_INIT(&as
->map_client_list
);
3202 as
->name
= g_strdup(name
? name
: "anonymous");
3203 address_space_update_topology(as
);
3204 address_space_update_ioeventfds(as
);
3207 static void do_address_space_destroy(AddressSpace
*as
)
3209 assert(!qatomic_read(&as
->bounce
.in_use
));
3210 assert(QLIST_EMPTY(&as
->map_client_list
));
3211 qemu_mutex_destroy(&as
->map_client_list_lock
);
3213 assert(QTAILQ_EMPTY(&as
->listeners
));
3215 flatview_unref(as
->current_map
);
3217 g_free(as
->ioeventfds
);
3218 memory_region_unref(as
->root
);
3221 void address_space_destroy(AddressSpace
*as
)
3223 MemoryRegion
*root
= as
->root
;
3225 /* Flush out anything from MemoryListeners listening in on this */
3226 memory_region_transaction_begin();
3228 memory_region_transaction_commit();
3229 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
3231 /* At this point, as->dispatch and as->current_map are dummy
3232 * entries that the guest should never use. Wait for the old
3233 * values to expire before freeing the data.
3236 call_rcu(as
, do_address_space_destroy
, rcu
);
3239 static const char *memory_region_type(MemoryRegion
*mr
)
3242 return memory_region_type(mr
->alias
);
3244 if (memory_region_is_ram_device(mr
)) {
3246 } else if (memory_region_is_romd(mr
)) {
3248 } else if (memory_region_is_rom(mr
)) {
3250 } else if (memory_region_is_ram(mr
)) {
3257 typedef struct MemoryRegionList MemoryRegionList
;
3259 struct MemoryRegionList
{
3260 const MemoryRegion
*mr
;
3261 QTAILQ_ENTRY(MemoryRegionList
) mrqueue
;
3264 typedef QTAILQ_HEAD(, MemoryRegionList
) MemoryRegionListHead
;
3266 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3267 int128_sub((size), int128_one())) : 0)
3268 #define MTREE_INDENT " "
3270 static void mtree_expand_owner(const char *label
, Object
*obj
)
3272 DeviceState
*dev
= (DeviceState
*) object_dynamic_cast(obj
, TYPE_DEVICE
);
3274 qemu_printf(" %s:{%s", label
, dev
? "dev" : "obj");
3275 if (dev
&& dev
->id
) {
3276 qemu_printf(" id=%s", dev
->id
);
3278 char *canonical_path
= object_get_canonical_path(obj
);
3279 if (canonical_path
) {
3280 qemu_printf(" path=%s", canonical_path
);
3281 g_free(canonical_path
);
3283 qemu_printf(" type=%s", object_get_typename(obj
));
3289 static void mtree_print_mr_owner(const MemoryRegion
*mr
)
3291 Object
*owner
= mr
->owner
;
3292 Object
*parent
= memory_region_owner((MemoryRegion
*)mr
);
3294 if (!owner
&& !parent
) {
3295 qemu_printf(" orphan");
3299 mtree_expand_owner("owner", owner
);
3301 if (parent
&& parent
!= owner
) {
3302 mtree_expand_owner("parent", parent
);
3306 static void mtree_print_mr(const MemoryRegion
*mr
, unsigned int level
,
3308 MemoryRegionListHead
*alias_print_queue
,
3309 bool owner
, bool display_disabled
)
3311 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
3312 MemoryRegionListHead submr_print_queue
;
3313 const MemoryRegion
*submr
;
3315 hwaddr cur_start
, cur_end
;
3321 cur_start
= base
+ mr
->addr
;
3322 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
3325 * Try to detect overflow of memory region. This should never
3326 * happen normally. When it happens, we dump something to warn the
3327 * user who is observing this.
3329 if (cur_start
< base
|| cur_end
< cur_start
) {
3330 qemu_printf("[DETECTED OVERFLOW!] ");
3336 /* check if the alias is already in the queue */
3337 QTAILQ_FOREACH(ml
, alias_print_queue
, mrqueue
) {
3338 if (ml
->mr
== mr
->alias
) {
3344 ml
= g_new(MemoryRegionList
, 1);
3346 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, mrqueue
);
3348 if (mr
->enabled
|| display_disabled
) {
3349 for (i
= 0; i
< level
; i
++) {
3350 qemu_printf(MTREE_INDENT
);
3352 qemu_printf(HWADDR_FMT_plx
"-" HWADDR_FMT_plx
3353 " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx
3354 "-" HWADDR_FMT_plx
"%s",
3357 mr
->nonvolatile
? "nv-" : "",
3358 memory_region_type((MemoryRegion
*)mr
),
3359 memory_region_name(mr
),
3360 memory_region_name(mr
->alias
),
3362 mr
->alias_offset
+ MR_SIZE(mr
->size
),
3363 mr
->enabled
? "" : " [disabled]");
3365 mtree_print_mr_owner(mr
);
3370 if (mr
->enabled
|| display_disabled
) {
3371 for (i
= 0; i
< level
; i
++) {
3372 qemu_printf(MTREE_INDENT
);
3374 qemu_printf(HWADDR_FMT_plx
"-" HWADDR_FMT_plx
3375 " (prio %d, %s%s): %s%s",
3378 mr
->nonvolatile
? "nv-" : "",
3379 memory_region_type((MemoryRegion
*)mr
),
3380 memory_region_name(mr
),
3381 mr
->enabled
? "" : " [disabled]");
3383 mtree_print_mr_owner(mr
);
3389 QTAILQ_INIT(&submr_print_queue
);
3391 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
3392 new_ml
= g_new(MemoryRegionList
, 1);
3394 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
3395 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
3396 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
3397 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
3398 QTAILQ_INSERT_BEFORE(ml
, new_ml
, mrqueue
);
3404 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, mrqueue
);
3408 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
3409 mtree_print_mr(ml
->mr
, level
+ 1, cur_start
,
3410 alias_print_queue
, owner
, display_disabled
);
3413 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, mrqueue
, next_ml
) {
3418 struct FlatViewInfo
{
3425 static void mtree_print_flatview(gpointer key
, gpointer value
,
3428 FlatView
*view
= key
;
3429 GArray
*fv_address_spaces
= value
;
3430 struct FlatViewInfo
*fvi
= user_data
;
3431 FlatRange
*range
= &view
->ranges
[0];
3437 qemu_printf("FlatView #%d\n", fvi
->counter
);
3440 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3441 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3442 qemu_printf(" AS \"%s\", root: %s",
3443 as
->name
, memory_region_name(as
->root
));
3444 if (as
->root
->alias
) {
3445 qemu_printf(", alias %s", memory_region_name(as
->root
->alias
));
3450 qemu_printf(" Root memory region: %s\n",
3451 view
->root
? memory_region_name(view
->root
) : "(none)");
3454 qemu_printf(MTREE_INDENT
"No rendered FlatView\n\n");
3460 if (range
->offset_in_region
) {
3461 qemu_printf(MTREE_INDENT HWADDR_FMT_plx
"-" HWADDR_FMT_plx
3462 " (prio %d, %s%s): %s @" HWADDR_FMT_plx
,
3463 int128_get64(range
->addr
.start
),
3464 int128_get64(range
->addr
.start
)
3465 + MR_SIZE(range
->addr
.size
),
3467 range
->nonvolatile
? "nv-" : "",
3468 range
->readonly
? "rom" : memory_region_type(mr
),
3469 memory_region_name(mr
),
3470 range
->offset_in_region
);
3472 qemu_printf(MTREE_INDENT HWADDR_FMT_plx
"-" HWADDR_FMT_plx
3473 " (prio %d, %s%s): %s",
3474 int128_get64(range
->addr
.start
),
3475 int128_get64(range
->addr
.start
)
3476 + MR_SIZE(range
->addr
.size
),
3478 range
->nonvolatile
? "nv-" : "",
3479 range
->readonly
? "rom" : memory_region_type(mr
),
3480 memory_region_name(mr
));
3483 mtree_print_mr_owner(mr
);
3487 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3488 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3489 if (fvi
->ac
->has_memory(current_machine
, as
,
3490 int128_get64(range
->addr
.start
),
3491 MR_SIZE(range
->addr
.size
) + 1)) {
3492 qemu_printf(" %s", fvi
->ac
->name
);
3500 #if !defined(CONFIG_USER_ONLY)
3501 if (fvi
->dispatch_tree
&& view
->root
) {
3502 mtree_print_dispatch(view
->dispatch
, view
->root
);
3509 static gboolean
mtree_info_flatview_free(gpointer key
, gpointer value
,
3512 FlatView
*view
= key
;
3513 GArray
*fv_address_spaces
= value
;
3515 g_array_unref(fv_address_spaces
);
3516 flatview_unref(view
);
3521 static void mtree_info_flatview(bool dispatch_tree
, bool owner
)
3523 struct FlatViewInfo fvi
= {
3525 .dispatch_tree
= dispatch_tree
,
3530 GArray
*fv_address_spaces
;
3531 GHashTable
*views
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
3532 AccelClass
*ac
= ACCEL_GET_CLASS(current_accel());
3534 if (ac
->has_memory
) {
3538 /* Gather all FVs in one table */
3539 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3540 view
= address_space_get_flatview(as
);
3542 fv_address_spaces
= g_hash_table_lookup(views
, view
);
3543 if (!fv_address_spaces
) {
3544 fv_address_spaces
= g_array_new(false, false, sizeof(as
));
3545 g_hash_table_insert(views
, view
, fv_address_spaces
);
3548 g_array_append_val(fv_address_spaces
, as
);
3552 g_hash_table_foreach(views
, mtree_print_flatview
, &fvi
);
3555 g_hash_table_foreach_remove(views
, mtree_info_flatview_free
, 0);
3556 g_hash_table_unref(views
);
3559 struct AddressSpaceInfo
{
3560 MemoryRegionListHead
*ml_head
;
3565 /* Returns negative value if a < b; zero if a = b; positive value if a > b. */
3566 static gint
address_space_compare_name(gconstpointer a
, gconstpointer b
)
3568 const AddressSpace
*as_a
= a
;
3569 const AddressSpace
*as_b
= b
;
3571 return g_strcmp0(as_a
->name
, as_b
->name
);
3574 static void mtree_print_as_name(gpointer data
, gpointer user_data
)
3576 AddressSpace
*as
= data
;
3578 qemu_printf("address-space: %s\n", as
->name
);
3581 static void mtree_print_as(gpointer key
, gpointer value
, gpointer user_data
)
3583 MemoryRegion
*mr
= key
;
3584 GSList
*as_same_root_mr_list
= value
;
3585 struct AddressSpaceInfo
*asi
= user_data
;
3587 g_slist_foreach(as_same_root_mr_list
, mtree_print_as_name
, NULL
);
3588 mtree_print_mr(mr
, 1, 0, asi
->ml_head
, asi
->owner
, asi
->disabled
);
3592 static gboolean
mtree_info_as_free(gpointer key
, gpointer value
,
3595 GSList
*as_same_root_mr_list
= value
;
3597 g_slist_free(as_same_root_mr_list
);
3602 static void mtree_info_as(bool dispatch_tree
, bool owner
, bool disabled
)
3604 MemoryRegionListHead ml_head
;
3605 MemoryRegionList
*ml
, *ml2
;
3607 GHashTable
*views
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
3608 GSList
*as_same_root_mr_list
;
3609 struct AddressSpaceInfo asi
= {
3610 .ml_head
= &ml_head
,
3612 .disabled
= disabled
,
3615 QTAILQ_INIT(&ml_head
);
3617 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3618 /* Create hashtable, key=AS root MR, value = list of AS */
3619 as_same_root_mr_list
= g_hash_table_lookup(views
, as
->root
);
3620 as_same_root_mr_list
= g_slist_insert_sorted(as_same_root_mr_list
, as
,
3621 address_space_compare_name
);
3622 g_hash_table_insert(views
, as
->root
, as_same_root_mr_list
);
3625 /* print address spaces */
3626 g_hash_table_foreach(views
, mtree_print_as
, &asi
);
3627 g_hash_table_foreach_remove(views
, mtree_info_as_free
, 0);
3628 g_hash_table_unref(views
);
3630 /* print aliased regions */
3631 QTAILQ_FOREACH(ml
, &ml_head
, mrqueue
) {
3632 qemu_printf("memory-region: %s\n", memory_region_name(ml
->mr
));
3633 mtree_print_mr(ml
->mr
, 1, 0, &ml_head
, owner
, disabled
);
3637 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, mrqueue
, ml2
) {
3642 void mtree_info(bool flatview
, bool dispatch_tree
, bool owner
, bool disabled
)
3645 mtree_info_flatview(dispatch_tree
, owner
);
3647 mtree_info_as(dispatch_tree
, owner
, disabled
);
3651 bool memory_region_init_ram(MemoryRegion
*mr
,
3657 DeviceState
*owner_dev
;
3659 if (!memory_region_init_ram_nomigrate(mr
, owner
, name
, size
, errp
)) {
3662 /* This will assert if owner is neither NULL nor a DeviceState.
3663 * We only want the owner here for the purposes of defining a
3664 * unique name for migration. TODO: Ideally we should implement
3665 * a naming scheme for Objects which are not DeviceStates, in
3666 * which case we can relax this restriction.
3668 owner_dev
= DEVICE(owner
);
3669 vmstate_register_ram(mr
, owner_dev
);
3674 bool memory_region_init_ram_guest_memfd(MemoryRegion
*mr
,
3680 DeviceState
*owner_dev
;
3682 if (!memory_region_init_ram_flags_nomigrate(mr
, owner
, name
, size
,
3683 RAM_GUEST_MEMFD
, errp
)) {
3686 /* This will assert if owner is neither NULL nor a DeviceState.
3687 * We only want the owner here for the purposes of defining a
3688 * unique name for migration. TODO: Ideally we should implement
3689 * a naming scheme for Objects which are not DeviceStates, in
3690 * which case we can relax this restriction.
3692 owner_dev
= DEVICE(owner
);
3693 vmstate_register_ram(mr
, owner_dev
);
3698 bool memory_region_init_rom(MemoryRegion
*mr
,
3704 DeviceState
*owner_dev
;
3706 if (!memory_region_init_rom_nomigrate(mr
, owner
, name
, size
, errp
)) {
3709 /* This will assert if owner is neither NULL nor a DeviceState.
3710 * We only want the owner here for the purposes of defining a
3711 * unique name for migration. TODO: Ideally we should implement
3712 * a naming scheme for Objects which are not DeviceStates, in
3713 * which case we can relax this restriction.
3715 owner_dev
= DEVICE(owner
);
3716 vmstate_register_ram(mr
, owner_dev
);
3721 bool memory_region_init_rom_device(MemoryRegion
*mr
,
3723 const MemoryRegionOps
*ops
,
3729 DeviceState
*owner_dev
;
3731 if (!memory_region_init_rom_device_nomigrate(mr
, owner
, ops
, opaque
,
3732 name
, size
, errp
)) {
3735 /* This will assert if owner is neither NULL nor a DeviceState.
3736 * We only want the owner here for the purposes of defining a
3737 * unique name for migration. TODO: Ideally we should implement
3738 * a naming scheme for Objects which are not DeviceStates, in
3739 * which case we can relax this restriction.
3741 owner_dev
= DEVICE(owner
);
3742 vmstate_register_ram(mr
, owner_dev
);
3748 * Support system builds with CONFIG_FUZZ using a weak symbol and a stub for
3749 * the fuzz_dma_read_cb callback
3752 void __attribute__((weak
)) fuzz_dma_read_cb(size_t addr
,
3759 static const TypeInfo memory_region_info
= {
3760 .parent
= TYPE_OBJECT
,
3761 .name
= TYPE_MEMORY_REGION
,
3762 .class_size
= sizeof(MemoryRegionClass
),
3763 .instance_size
= sizeof(MemoryRegion
),
3764 .instance_init
= memory_region_initfn
,
3765 .instance_finalize
= memory_region_finalize
,
3768 static const TypeInfo iommu_memory_region_info
= {
3769 .parent
= TYPE_MEMORY_REGION
,
3770 .name
= TYPE_IOMMU_MEMORY_REGION
,
3771 .class_size
= sizeof(IOMMUMemoryRegionClass
),
3772 .instance_size
= sizeof(IOMMUMemoryRegion
),
3773 .instance_init
= iommu_memory_region_initfn
,
3777 static const TypeInfo ram_discard_manager_info
= {
3778 .parent
= TYPE_INTERFACE
,
3779 .name
= TYPE_RAM_DISCARD_MANAGER
,
3780 .class_size
= sizeof(RamDiscardManagerClass
),
3783 static void memory_register_types(void)
3785 type_register_static(&memory_region_info
);
3786 type_register_static(&iommu_memory_region_info
);
3787 type_register_static(&ram_discard_manager_info
);
3790 type_init(memory_register_types
)