2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
18 #include "qapi/error.h"
20 #include "exec/exec-all.h" /* qemu_sprint_backtrace */
21 #include "exec/memory.h"
22 #include "exec/address-spaces.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/qemu-print.h"
28 #include "qemu-common.h" /* trace_unassigned */
29 #include "qom/object.h"
32 #include "exec/memory-internal.h"
33 #include "exec/ram_addr.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/runstate.h"
36 #include "sysemu/tcg.h"
37 #include "sysemu/accel.h"
38 #include "hw/boards.h"
39 #include "migration/vmstate.h"
41 //#define DEBUG_UNASSIGNED
43 static unsigned memory_region_transaction_depth
;
44 static bool memory_region_update_pending
;
45 static bool ioeventfd_update_pending
;
46 bool global_dirty_log
;
48 static QTAILQ_HEAD(, MemoryListener
) memory_listeners
49 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
51 static QTAILQ_HEAD(, AddressSpace
) address_spaces
52 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
54 static GHashTable
*flat_views
;
56 typedef struct AddrRange AddrRange
;
59 * Note that signed integers are needed for negative offsetting in aliases
60 * (large MemoryRegion::alias_offset).
67 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
69 return (AddrRange
) { start
, size
};
72 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
74 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
77 static Int128
addrrange_end(AddrRange r
)
79 return int128_add(r
.start
, r
.size
);
82 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
84 int128_addto(&range
.start
, delta
);
88 static bool addrrange_contains(AddrRange range
, Int128 addr
)
90 return int128_ge(addr
, range
.start
)
91 && int128_lt(addr
, addrrange_end(range
));
94 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
96 return addrrange_contains(r1
, r2
.start
)
97 || addrrange_contains(r2
, r1
.start
);
100 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
102 Int128 start
= int128_max(r1
.start
, r2
.start
);
103 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
104 return addrrange_make(start
, int128_sub(end
, start
));
107 enum ListenerDirection
{ Forward
, Reverse
};
109 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
111 MemoryListener *_listener; \
113 switch (_direction) { \
115 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
116 if (_listener->_callback) { \
117 _listener->_callback(_listener, ##_args); \
122 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
123 if (_listener->_callback) { \
124 _listener->_callback(_listener, ##_args); \
133 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
135 MemoryListener *_listener; \
137 switch (_direction) { \
139 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
140 if (_listener->_callback) { \
141 _listener->_callback(_listener, _section, ##_args); \
146 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
147 if (_listener->_callback) { \
148 _listener->_callback(_listener, _section, ##_args); \
157 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
158 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
160 MemoryRegionSection mrs = section_from_flat_range(fr, \
161 address_space_to_flatview(as)); \
162 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
165 struct CoalescedMemoryRange
{
167 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
170 struct MemoryRegionIoeventfd
{
177 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd
*a
,
178 MemoryRegionIoeventfd
*b
)
180 if (int128_lt(a
->addr
.start
, b
->addr
.start
)) {
182 } else if (int128_gt(a
->addr
.start
, b
->addr
.start
)) {
184 } else if (int128_lt(a
->addr
.size
, b
->addr
.size
)) {
186 } else if (int128_gt(a
->addr
.size
, b
->addr
.size
)) {
188 } else if (a
->match_data
< b
->match_data
) {
190 } else if (a
->match_data
> b
->match_data
) {
192 } else if (a
->match_data
) {
193 if (a
->data
< b
->data
) {
195 } else if (a
->data
> b
->data
) {
201 } else if (a
->e
> b
->e
) {
207 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd
*a
,
208 MemoryRegionIoeventfd
*b
)
210 if (int128_eq(a
->addr
.start
, b
->addr
.start
) &&
211 (!int128_nz(a
->addr
.size
) || !int128_nz(b
->addr
.size
) ||
212 (int128_eq(a
->addr
.size
, b
->addr
.size
) &&
213 (a
->match_data
== b
->match_data
) &&
214 ((a
->match_data
&& (a
->data
== b
->data
)) || !a
->match_data
) &&
221 /* Range of memory in the global map. Addresses are absolute. */
224 hwaddr offset_in_region
;
226 uint8_t dirty_log_mask
;
232 #define FOR_EACH_FLAT_RANGE(var, view) \
233 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
235 static inline MemoryRegionSection
236 section_from_flat_range(FlatRange
*fr
, FlatView
*fv
)
238 return (MemoryRegionSection
) {
241 .offset_within_region
= fr
->offset_in_region
,
242 .size
= fr
->addr
.size
,
243 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
244 .readonly
= fr
->readonly
,
245 .nonvolatile
= fr
->nonvolatile
,
249 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
251 return a
->mr
== b
->mr
252 && addrrange_equal(a
->addr
, b
->addr
)
253 && a
->offset_in_region
== b
->offset_in_region
254 && a
->romd_mode
== b
->romd_mode
255 && a
->readonly
== b
->readonly
256 && a
->nonvolatile
== b
->nonvolatile
;
259 static FlatView
*flatview_new(MemoryRegion
*mr_root
)
263 view
= g_new0(FlatView
, 1);
265 view
->root
= mr_root
;
266 memory_region_ref(mr_root
);
267 trace_flatview_new(view
, mr_root
);
272 /* Insert a range into a given position. Caller is responsible for maintaining
275 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
277 if (view
->nr
== view
->nr_allocated
) {
278 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
279 view
->ranges
= g_realloc(view
->ranges
,
280 view
->nr_allocated
* sizeof(*view
->ranges
));
282 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
283 (view
->nr
- pos
) * sizeof(FlatRange
));
284 view
->ranges
[pos
] = *range
;
285 memory_region_ref(range
->mr
);
289 static void flatview_destroy(FlatView
*view
)
293 trace_flatview_destroy(view
, view
->root
);
294 if (view
->dispatch
) {
295 address_space_dispatch_free(view
->dispatch
);
297 for (i
= 0; i
< view
->nr
; i
++) {
298 memory_region_unref(view
->ranges
[i
].mr
);
300 g_free(view
->ranges
);
301 memory_region_unref(view
->root
);
305 static bool flatview_ref(FlatView
*view
)
307 return qatomic_fetch_inc_nonzero(&view
->ref
) > 0;
310 void flatview_unref(FlatView
*view
)
312 if (qatomic_fetch_dec(&view
->ref
) == 1) {
313 trace_flatview_destroy_rcu(view
, view
->root
);
315 call_rcu(view
, flatview_destroy
, rcu
);
319 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
321 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
323 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
325 int128_make64(r2
->offset_in_region
))
326 && r1
->dirty_log_mask
== r2
->dirty_log_mask
327 && r1
->romd_mode
== r2
->romd_mode
328 && r1
->readonly
== r2
->readonly
329 && r1
->nonvolatile
== r2
->nonvolatile
;
332 /* Attempt to simplify a view by merging adjacent ranges */
333 static void flatview_simplify(FlatView
*view
)
338 while (i
< view
->nr
) {
341 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
342 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
346 for (k
= i
; k
< j
; k
++) {
347 memory_region_unref(view
->ranges
[k
].mr
);
349 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
350 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
355 static bool memory_region_big_endian(MemoryRegion
*mr
)
357 #ifdef TARGET_WORDS_BIGENDIAN
358 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
360 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
364 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, MemOp op
)
366 if ((op
& MO_BSWAP
) != devend_memop(mr
->ops
->endianness
)) {
367 switch (op
& MO_SIZE
) {
371 *data
= bswap16(*data
);
374 *data
= bswap32(*data
);
377 *data
= bswap64(*data
);
380 g_assert_not_reached();
385 static inline void memory_region_shift_read_access(uint64_t *value
,
391 *value
|= (tmp
& mask
) << shift
;
393 *value
|= (tmp
& mask
) >> -shift
;
397 static inline uint64_t memory_region_shift_write_access(uint64_t *value
,
404 tmp
= (*value
>> shift
) & mask
;
406 tmp
= (*value
<< -shift
) & mask
;
412 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
415 hwaddr abs_addr
= offset
;
417 abs_addr
+= mr
->addr
;
418 for (root
= mr
; root
->container
; ) {
419 root
= root
->container
;
420 abs_addr
+= root
->addr
;
426 static int get_cpu_index(void)
429 return current_cpu
->cpu_index
;
434 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
444 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
446 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
447 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ
)) {
448 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
449 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
451 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
455 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
466 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
468 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
469 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ
)) {
470 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
471 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
473 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
477 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
485 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
488 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
489 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE
)) {
490 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
491 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
493 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
497 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
505 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
508 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
509 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE
)) {
510 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
511 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
513 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
516 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
519 unsigned access_size_min
,
520 unsigned access_size_max
,
521 MemTxResult (*access_fn
)
532 uint64_t access_mask
;
533 unsigned access_size
;
535 MemTxResult r
= MEMTX_OK
;
537 if (!access_size_min
) {
540 if (!access_size_max
) {
544 /* FIXME: support unaligned access? */
545 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
546 access_mask
= MAKE_64BIT_MASK(0, access_size
* 8);
547 if (memory_region_big_endian(mr
)) {
548 for (i
= 0; i
< size
; i
+= access_size
) {
549 r
|= access_fn(mr
, addr
+ i
, value
, access_size
,
550 (size
- access_size
- i
) * 8, access_mask
, attrs
);
553 for (i
= 0; i
< size
; i
+= access_size
) {
554 r
|= access_fn(mr
, addr
+ i
, value
, access_size
, i
* 8,
561 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
565 while (mr
->container
) {
568 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
569 if (mr
== as
->root
) {
576 /* Render a memory region into the global view. Ranges in @view obscure
579 static void render_memory_region(FlatView
*view
,
586 MemoryRegion
*subregion
;
588 hwaddr offset_in_region
;
598 int128_addto(&base
, int128_make64(mr
->addr
));
599 readonly
|= mr
->readonly
;
600 nonvolatile
|= mr
->nonvolatile
;
602 tmp
= addrrange_make(base
, mr
->size
);
604 if (!addrrange_intersects(tmp
, clip
)) {
608 clip
= addrrange_intersection(tmp
, clip
);
611 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
612 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
613 render_memory_region(view
, mr
->alias
, base
, clip
,
614 readonly
, nonvolatile
);
618 /* Render subregions in priority order. */
619 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
620 render_memory_region(view
, subregion
, base
, clip
,
621 readonly
, nonvolatile
);
624 if (!mr
->terminates
) {
628 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
633 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
634 fr
.romd_mode
= mr
->romd_mode
;
635 fr
.readonly
= readonly
;
636 fr
.nonvolatile
= nonvolatile
;
638 /* Render the region itself into any gaps left by the current view. */
639 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
640 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
643 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
644 now
= int128_min(remain
,
645 int128_sub(view
->ranges
[i
].addr
.start
, base
));
646 fr
.offset_in_region
= offset_in_region
;
647 fr
.addr
= addrrange_make(base
, now
);
648 flatview_insert(view
, i
, &fr
);
650 int128_addto(&base
, now
);
651 offset_in_region
+= int128_get64(now
);
652 int128_subfrom(&remain
, now
);
654 now
= int128_sub(int128_min(int128_add(base
, remain
),
655 addrrange_end(view
->ranges
[i
].addr
)),
657 int128_addto(&base
, now
);
658 offset_in_region
+= int128_get64(now
);
659 int128_subfrom(&remain
, now
);
661 if (int128_nz(remain
)) {
662 fr
.offset_in_region
= offset_in_region
;
663 fr
.addr
= addrrange_make(base
, remain
);
664 flatview_insert(view
, i
, &fr
);
668 void flatview_for_each_range(FlatView
*fv
, flatview_cb cb
, void *opaque
)
675 FOR_EACH_FLAT_RANGE(fr
, fv
) {
676 if (cb(fr
->addr
.start
, fr
->addr
.size
, fr
->mr
, opaque
))
681 static MemoryRegion
*memory_region_get_flatview_root(MemoryRegion
*mr
)
683 while (mr
->enabled
) {
685 if (!mr
->alias_offset
&& int128_ge(mr
->size
, mr
->alias
->size
)) {
686 /* The alias is included in its entirety. Use it as
687 * the "real" root, so that we can share more FlatViews.
692 } else if (!mr
->terminates
) {
693 unsigned int found
= 0;
694 MemoryRegion
*child
, *next
= NULL
;
695 QTAILQ_FOREACH(child
, &mr
->subregions
, subregions_link
) {
696 if (child
->enabled
) {
701 if (!child
->addr
&& int128_ge(mr
->size
, child
->size
)) {
702 /* A child is included in its entirety. If it's the only
703 * enabled one, use it in the hope of finding an alias down the
704 * way. This will also let us share FlatViews.
725 /* Render a memory topology into a list of disjoint absolute ranges. */
726 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
731 view
= flatview_new(mr
);
734 render_memory_region(view
, mr
, int128_zero(),
735 addrrange_make(int128_zero(), int128_2_64()),
738 flatview_simplify(view
);
740 view
->dispatch
= address_space_dispatch_new(view
);
741 for (i
= 0; i
< view
->nr
; i
++) {
742 MemoryRegionSection mrs
=
743 section_from_flat_range(&view
->ranges
[i
], view
);
744 flatview_add_to_dispatch(view
, &mrs
);
746 address_space_dispatch_compact(view
->dispatch
);
747 g_hash_table_replace(flat_views
, mr
, view
);
752 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
753 MemoryRegionIoeventfd
*fds_new
,
755 MemoryRegionIoeventfd
*fds_old
,
759 MemoryRegionIoeventfd
*fd
;
760 MemoryRegionSection section
;
762 /* Generate a symmetric difference of the old and new fd sets, adding
763 * and deleting as necessary.
767 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
768 if (iold
< fds_old_nb
769 && (inew
== fds_new_nb
770 || memory_region_ioeventfd_before(&fds_old
[iold
],
773 section
= (MemoryRegionSection
) {
774 .fv
= address_space_to_flatview(as
),
775 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
776 .size
= fd
->addr
.size
,
778 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
779 fd
->match_data
, fd
->data
, fd
->e
);
781 } else if (inew
< fds_new_nb
782 && (iold
== fds_old_nb
783 || memory_region_ioeventfd_before(&fds_new
[inew
],
786 section
= (MemoryRegionSection
) {
787 .fv
= address_space_to_flatview(as
),
788 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
789 .size
= fd
->addr
.size
,
791 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
792 fd
->match_data
, fd
->data
, fd
->e
);
801 FlatView
*address_space_get_flatview(AddressSpace
*as
)
805 RCU_READ_LOCK_GUARD();
807 view
= address_space_to_flatview(as
);
808 /* If somebody has replaced as->current_map concurrently,
809 * flatview_ref returns false.
811 } while (!flatview_ref(view
));
815 static void address_space_update_ioeventfds(AddressSpace
*as
)
819 unsigned ioeventfd_nb
= 0;
820 unsigned ioeventfd_max
;
821 MemoryRegionIoeventfd
*ioeventfds
;
826 * It is likely that the number of ioeventfds hasn't changed much, so use
827 * the previous size as the starting value, with some headroom to avoid
828 * gratuitous reallocations.
830 ioeventfd_max
= QEMU_ALIGN_UP(as
->ioeventfd_nb
, 4);
831 ioeventfds
= g_new(MemoryRegionIoeventfd
, ioeventfd_max
);
833 view
= address_space_get_flatview(as
);
834 FOR_EACH_FLAT_RANGE(fr
, view
) {
835 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
836 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
837 int128_sub(fr
->addr
.start
,
838 int128_make64(fr
->offset_in_region
)));
839 if (addrrange_intersects(fr
->addr
, tmp
)) {
841 if (ioeventfd_nb
> ioeventfd_max
) {
842 ioeventfd_max
= MAX(ioeventfd_max
* 2, 4);
843 ioeventfds
= g_realloc(ioeventfds
,
844 ioeventfd_max
* sizeof(*ioeventfds
));
846 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
847 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
852 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
853 as
->ioeventfds
, as
->ioeventfd_nb
);
855 g_free(as
->ioeventfds
);
856 as
->ioeventfds
= ioeventfds
;
857 as
->ioeventfd_nb
= ioeventfd_nb
;
858 flatview_unref(view
);
862 * Notify the memory listeners about the coalesced IO change events of
863 * range `cmr'. Only the part that has intersection of the specified
864 * FlatRange will be sent.
866 static void flat_range_coalesced_io_notify(FlatRange
*fr
, AddressSpace
*as
,
867 CoalescedMemoryRange
*cmr
, bool add
)
871 tmp
= addrrange_shift(cmr
->addr
,
872 int128_sub(fr
->addr
.start
,
873 int128_make64(fr
->offset_in_region
)));
874 if (!addrrange_intersects(tmp
, fr
->addr
)) {
877 tmp
= addrrange_intersection(tmp
, fr
->addr
);
880 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, coalesced_io_add
,
881 int128_get64(tmp
.start
),
882 int128_get64(tmp
.size
));
884 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Reverse
, coalesced_io_del
,
885 int128_get64(tmp
.start
),
886 int128_get64(tmp
.size
));
890 static void flat_range_coalesced_io_del(FlatRange
*fr
, AddressSpace
*as
)
892 CoalescedMemoryRange
*cmr
;
894 QTAILQ_FOREACH(cmr
, &fr
->mr
->coalesced
, link
) {
895 flat_range_coalesced_io_notify(fr
, as
, cmr
, false);
899 static void flat_range_coalesced_io_add(FlatRange
*fr
, AddressSpace
*as
)
901 MemoryRegion
*mr
= fr
->mr
;
902 CoalescedMemoryRange
*cmr
;
904 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
908 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
909 flat_range_coalesced_io_notify(fr
, as
, cmr
, true);
913 static void address_space_update_topology_pass(AddressSpace
*as
,
914 const FlatView
*old_view
,
915 const FlatView
*new_view
,
919 FlatRange
*frold
, *frnew
;
921 /* Generate a symmetric difference of the old and new memory maps.
922 * Kill ranges in the old map, and instantiate ranges in the new map.
925 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
926 if (iold
< old_view
->nr
) {
927 frold
= &old_view
->ranges
[iold
];
931 if (inew
< new_view
->nr
) {
932 frnew
= &new_view
->ranges
[inew
];
939 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
940 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
941 && !flatrange_equal(frold
, frnew
)))) {
942 /* In old but not in new, or in both but attributes changed. */
945 flat_range_coalesced_io_del(frold
, as
);
946 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
950 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
951 /* In both and unchanged (except logging may have changed) */
954 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
955 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
956 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
957 frold
->dirty_log_mask
,
958 frnew
->dirty_log_mask
);
960 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
961 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
962 frold
->dirty_log_mask
,
963 frnew
->dirty_log_mask
);
973 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
974 flat_range_coalesced_io_add(frnew
, as
);
982 static void flatviews_init(void)
984 static FlatView
*empty_view
;
990 flat_views
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
, NULL
,
991 (GDestroyNotify
) flatview_unref
);
993 empty_view
= generate_memory_topology(NULL
);
994 /* We keep it alive forever in the global variable. */
995 flatview_ref(empty_view
);
997 g_hash_table_replace(flat_views
, NULL
, empty_view
);
998 flatview_ref(empty_view
);
1002 static void flatviews_reset(void)
1007 g_hash_table_unref(flat_views
);
1012 /* Render unique FVs */
1013 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1014 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1016 if (g_hash_table_lookup(flat_views
, physmr
)) {
1020 generate_memory_topology(physmr
);
1024 static void address_space_set_flatview(AddressSpace
*as
)
1026 FlatView
*old_view
= address_space_to_flatview(as
);
1027 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1028 FlatView
*new_view
= g_hash_table_lookup(flat_views
, physmr
);
1032 if (old_view
== new_view
) {
1037 flatview_ref(old_view
);
1040 flatview_ref(new_view
);
1042 if (!QTAILQ_EMPTY(&as
->listeners
)) {
1043 FlatView tmpview
= { .nr
= 0 }, *old_view2
= old_view
;
1046 old_view2
= &tmpview
;
1048 address_space_update_topology_pass(as
, old_view2
, new_view
, false);
1049 address_space_update_topology_pass(as
, old_view2
, new_view
, true);
1052 /* Writes are protected by the BQL. */
1053 qatomic_rcu_set(&as
->current_map
, new_view
);
1055 flatview_unref(old_view
);
1058 /* Note that all the old MemoryRegions are still alive up to this
1059 * point. This relieves most MemoryListeners from the need to
1060 * ref/unref the MemoryRegions they get---unless they use them
1061 * outside the iothread mutex, in which case precise reference
1062 * counting is necessary.
1065 flatview_unref(old_view
);
1069 static void address_space_update_topology(AddressSpace
*as
)
1071 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1074 if (!g_hash_table_lookup(flat_views
, physmr
)) {
1075 generate_memory_topology(physmr
);
1077 address_space_set_flatview(as
);
1080 void memory_region_transaction_begin(void)
1082 qemu_flush_coalesced_mmio_buffer();
1083 ++memory_region_transaction_depth
;
1086 void memory_region_transaction_commit(void)
1090 assert(memory_region_transaction_depth
);
1091 assert(qemu_mutex_iothread_locked());
1093 --memory_region_transaction_depth
;
1094 if (!memory_region_transaction_depth
) {
1095 if (memory_region_update_pending
) {
1098 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
1100 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1101 address_space_set_flatview(as
);
1102 address_space_update_ioeventfds(as
);
1104 memory_region_update_pending
= false;
1105 ioeventfd_update_pending
= false;
1106 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
1107 } else if (ioeventfd_update_pending
) {
1108 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1109 address_space_update_ioeventfds(as
);
1111 ioeventfd_update_pending
= false;
1116 static void memory_region_destructor_none(MemoryRegion
*mr
)
1120 static void memory_region_destructor_ram(MemoryRegion
*mr
)
1122 qemu_ram_free(mr
->ram_block
);
1125 static bool memory_region_need_escape(char c
)
1127 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
1130 static char *memory_region_escape_name(const char *name
)
1137 for (p
= name
; *p
; p
++) {
1138 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
1140 if (bytes
== p
- name
) {
1141 return g_memdup(name
, bytes
+ 1);
1144 escaped
= g_malloc(bytes
+ 1);
1145 for (p
= name
, q
= escaped
; *p
; p
++) {
1147 if (unlikely(memory_region_need_escape(c
))) {
1150 *q
++ = "0123456789abcdef"[c
>> 4];
1151 c
= "0123456789abcdef"[c
& 15];
1159 static void memory_region_do_init(MemoryRegion
*mr
,
1164 mr
->size
= int128_make64(size
);
1165 if (size
== UINT64_MAX
) {
1166 mr
->size
= int128_2_64();
1168 mr
->name
= g_strdup(name
);
1170 mr
->ram_block
= NULL
;
1173 char *escaped_name
= memory_region_escape_name(name
);
1174 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1177 owner
= container_get(qdev_get_machine(), "/unattached");
1180 object_property_add_child(owner
, name_array
, OBJECT(mr
));
1181 object_unref(OBJECT(mr
));
1183 g_free(escaped_name
);
1187 void memory_region_init(MemoryRegion
*mr
,
1192 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
1193 memory_region_do_init(mr
, owner
, name
, size
);
1196 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1197 const char *name
, void *opaque
,
1200 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1201 char *path
= (char *)"";
1203 if (mr
->container
) {
1204 path
= object_get_canonical_path(OBJECT(mr
->container
));
1206 visit_type_str(v
, name
, &path
, errp
);
1207 if (mr
->container
) {
1212 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1215 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1217 return OBJECT(mr
->container
);
1220 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1221 const char *name
, void *opaque
,
1224 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1225 int32_t value
= mr
->priority
;
1227 visit_type_int32(v
, name
, &value
, errp
);
1230 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1231 void *opaque
, Error
**errp
)
1233 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1234 uint64_t value
= memory_region_size(mr
);
1236 visit_type_uint64(v
, name
, &value
, errp
);
1239 static void memory_region_initfn(Object
*obj
)
1241 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1244 mr
->ops
= &unassigned_mem_ops
;
1246 mr
->romd_mode
= true;
1247 mr
->destructor
= memory_region_destructor_none
;
1248 QTAILQ_INIT(&mr
->subregions
);
1249 QTAILQ_INIT(&mr
->coalesced
);
1251 op
= object_property_add(OBJECT(mr
), "container",
1252 "link<" TYPE_MEMORY_REGION
">",
1253 memory_region_get_container
,
1254 NULL
, /* memory_region_set_container */
1256 op
->resolve
= memory_region_resolve_container
;
1258 object_property_add_uint64_ptr(OBJECT(mr
), "addr",
1259 &mr
->addr
, OBJ_PROP_FLAG_READ
);
1260 object_property_add(OBJECT(mr
), "priority", "uint32",
1261 memory_region_get_priority
,
1262 NULL
, /* memory_region_set_priority */
1264 object_property_add(OBJECT(mr
), "size", "uint64",
1265 memory_region_get_size
,
1266 NULL
, /* memory_region_set_size, */
1270 static int qemu_target_backtrace(target_ulong
*array
, size_t size
)
1274 #if defined(TARGET_ARM)
1275 CPUArchState
*env
= current_cpu
->env_ptr
;
1276 array
[0] = env
->regs
[15];
1277 array
[1] = env
->regs
[14];
1278 #elif defined(TARGET_MIPS)
1279 CPUArchState
*env
= current_cpu
->env_ptr
;
1280 array
[0] = env
->active_tc
.PC
;
1281 array
[1] = env
->active_tc
.gpr
[31];
1291 #include "disas/disas.h"
1292 const char *qemu_sprint_backtrace(char *buffer
, size_t length
)
1296 target_ulong caller
[2];
1298 qemu_target_backtrace(caller
, 2);
1299 symbol
= lookup_symbol(caller
[0]);
1300 p
+= sprintf(p
, "[%s]", symbol
);
1301 symbol
= lookup_symbol(caller
[1]);
1302 p
+= sprintf(p
, "[%s]", symbol
);
1304 p
+= sprintf(p
, "[cpu not running]");
1306 assert((p
- buffer
) < length
);
1310 static void iommu_memory_region_initfn(Object
*obj
)
1312 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1314 mr
->is_iommu
= true;
1317 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1320 if (trace_unassigned
) {
1322 fprintf(stderr
, "Unassigned mem read " TARGET_FMT_plx
" %s\n",
1323 addr
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1329 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1330 uint64_t val
, unsigned size
)
1332 if (trace_unassigned
) {
1334 fprintf(stderr
, "Unassigned mem write " TARGET_FMT_plx
1335 " = 0x%" PRIx64
" %s\n",
1336 addr
, val
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1340 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1341 unsigned size
, bool is_write
,
1347 const MemoryRegionOps unassigned_mem_ops
= {
1348 .valid
.accepts
= unassigned_mem_accepts
,
1349 .endianness
= DEVICE_NATIVE_ENDIAN
,
1352 static uint64_t memory_region_ram_device_read(void *opaque
,
1353 hwaddr addr
, unsigned size
)
1355 MemoryRegion
*mr
= opaque
;
1356 uint64_t data
= (uint64_t)~0;
1360 data
= *(uint8_t *)(mr
->ram_block
->host
+ addr
);
1363 data
= *(uint16_t *)(mr
->ram_block
->host
+ addr
);
1366 data
= *(uint32_t *)(mr
->ram_block
->host
+ addr
);
1369 data
= *(uint64_t *)(mr
->ram_block
->host
+ addr
);
1373 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1378 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1379 uint64_t data
, unsigned size
)
1381 MemoryRegion
*mr
= opaque
;
1383 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1387 *(uint8_t *)(mr
->ram_block
->host
+ addr
) = (uint8_t)data
;
1390 *(uint16_t *)(mr
->ram_block
->host
+ addr
) = (uint16_t)data
;
1393 *(uint32_t *)(mr
->ram_block
->host
+ addr
) = (uint32_t)data
;
1396 *(uint64_t *)(mr
->ram_block
->host
+ addr
) = data
;
1401 static const MemoryRegionOps ram_device_mem_ops
= {
1402 .read
= memory_region_ram_device_read
,
1403 .write
= memory_region_ram_device_write
,
1404 .endianness
= DEVICE_HOST_ENDIAN
,
1406 .min_access_size
= 1,
1407 .max_access_size
= 8,
1411 .min_access_size
= 1,
1412 .max_access_size
= 8,
1417 bool memory_region_access_valid(MemoryRegion
*mr
,
1423 if (mr
->ops
->valid
.accepts
1424 && !mr
->ops
->valid
.accepts(mr
->opaque
, addr
, size
, is_write
, attrs
)) {
1425 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid access at addr "
1426 "0x%" HWADDR_PRIX
", size %u, "
1427 "region '%s', reason: rejected\n",
1428 addr
, size
, memory_region_name(mr
));
1432 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1433 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid access at addr "
1434 "0x%" HWADDR_PRIX
", size %u, "
1435 "region '%s', reason: unaligned\n",
1436 addr
, size
, memory_region_name(mr
));
1440 /* Treat zero as compatibility all valid */
1441 if (!mr
->ops
->valid
.max_access_size
) {
1445 if (size
> mr
->ops
->valid
.max_access_size
1446 || size
< mr
->ops
->valid
.min_access_size
) {
1447 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid access at addr "
1448 "0x%" HWADDR_PRIX
", size %u, "
1449 "region '%s', reason: invalid size "
1450 "(min:%u max:%u)\n",
1451 addr
, size
, memory_region_name(mr
),
1452 mr
->ops
->valid
.min_access_size
,
1453 mr
->ops
->valid
.max_access_size
);
1459 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1467 if (mr
->ops
->read
) {
1468 return access_with_adjusted_size(addr
, pval
, size
,
1469 mr
->ops
->impl
.min_access_size
,
1470 mr
->ops
->impl
.max_access_size
,
1471 memory_region_read_accessor
,
1474 return access_with_adjusted_size(addr
, pval
, size
,
1475 mr
->ops
->impl
.min_access_size
,
1476 mr
->ops
->impl
.max_access_size
,
1477 memory_region_read_with_attrs_accessor
,
1482 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1488 unsigned size
= memop_size(op
);
1491 fuzz_dma_read_cb(addr
, size
, mr
, false);
1492 if (!memory_region_access_valid(mr
, addr
, size
, false, attrs
)) {
1493 *pval
= unassigned_mem_read(mr
, addr
, size
);
1494 return MEMTX_DECODE_ERROR
;
1497 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1498 adjust_endianness(mr
, pval
, op
);
1502 /* Return true if an eventfd was signalled */
1503 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1509 MemoryRegionIoeventfd ioeventfd
= {
1510 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1515 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1516 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1517 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1519 if (memory_region_ioeventfd_equal(&ioeventfd
, &mr
->ioeventfds
[i
])) {
1520 event_notifier_set(ioeventfd
.e
);
1528 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1534 unsigned size
= memop_size(op
);
1536 if (!memory_region_access_valid(mr
, addr
, size
, true, attrs
)) {
1537 unassigned_mem_write(mr
, addr
, data
, size
);
1538 return MEMTX_DECODE_ERROR
;
1541 adjust_endianness(mr
, &data
, op
);
1543 if ((!kvm_eventfds_enabled()) &&
1544 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1548 if (mr
->ops
->write
) {
1549 return access_with_adjusted_size(addr
, &data
, size
,
1550 mr
->ops
->impl
.min_access_size
,
1551 mr
->ops
->impl
.max_access_size
,
1552 memory_region_write_accessor
, mr
,
1556 access_with_adjusted_size(addr
, &data
, size
,
1557 mr
->ops
->impl
.min_access_size
,
1558 mr
->ops
->impl
.max_access_size
,
1559 memory_region_write_with_attrs_accessor
,
1564 void memory_region_init_io(MemoryRegion
*mr
,
1566 const MemoryRegionOps
*ops
,
1571 memory_region_init(mr
, owner
, name
, size
);
1572 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1573 mr
->opaque
= opaque
;
1574 mr
->terminates
= true;
1577 void memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
1583 memory_region_init_ram_shared_nomigrate(mr
, owner
, name
, size
, false, errp
);
1586 void memory_region_init_ram_shared_nomigrate(MemoryRegion
*mr
,
1594 memory_region_init(mr
, owner
, name
, size
);
1596 mr
->terminates
= true;
1597 mr
->destructor
= memory_region_destructor_ram
;
1598 mr
->ram_block
= qemu_ram_alloc(size
, share
, mr
, &err
);
1599 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1601 mr
->size
= int128_zero();
1602 object_unparent(OBJECT(mr
));
1603 error_propagate(errp
, err
);
1607 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1612 void (*resized
)(const char*,
1618 memory_region_init(mr
, owner
, name
, size
);
1620 mr
->terminates
= true;
1621 mr
->destructor
= memory_region_destructor_ram
;
1622 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1624 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1626 mr
->size
= int128_zero();
1627 object_unparent(OBJECT(mr
));
1628 error_propagate(errp
, err
);
1633 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1634 struct Object
*owner
,
1643 memory_region_init(mr
, owner
, name
, size
);
1645 mr
->terminates
= true;
1646 mr
->destructor
= memory_region_destructor_ram
;
1648 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, ram_flags
, path
, &err
);
1649 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1651 mr
->size
= int128_zero();
1652 object_unparent(OBJECT(mr
));
1653 error_propagate(errp
, err
);
1657 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
1658 struct Object
*owner
,
1666 memory_region_init(mr
, owner
, name
, size
);
1668 mr
->terminates
= true;
1669 mr
->destructor
= memory_region_destructor_ram
;
1670 mr
->ram_block
= qemu_ram_alloc_from_fd(size
, mr
,
1671 share
? RAM_SHARED
: 0,
1673 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1675 mr
->size
= int128_zero();
1676 object_unparent(OBJECT(mr
));
1677 error_propagate(errp
, err
);
1682 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1688 memory_region_init(mr
, owner
, name
, size
);
1690 mr
->terminates
= true;
1691 mr
->destructor
= memory_region_destructor_ram
;
1692 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1694 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1695 assert(ptr
!= NULL
);
1696 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1699 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1705 memory_region_init(mr
, owner
, name
, size
);
1707 mr
->terminates
= true;
1708 mr
->ram_device
= true;
1709 mr
->ops
= &ram_device_mem_ops
;
1711 mr
->destructor
= memory_region_destructor_ram
;
1712 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1713 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1714 assert(ptr
!= NULL
);
1715 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1718 void memory_region_init_alias(MemoryRegion
*mr
,
1725 memory_region_init(mr
, owner
, name
, size
);
1727 mr
->alias_offset
= offset
;
1730 void memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
1731 struct Object
*owner
,
1736 memory_region_init_ram_shared_nomigrate(mr
, owner
, name
, size
, false, errp
);
1737 mr
->readonly
= true;
1740 void memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
1742 const MemoryRegionOps
*ops
,
1750 memory_region_init(mr
, owner
, name
, size
);
1752 mr
->opaque
= opaque
;
1753 mr
->terminates
= true;
1754 mr
->rom_device
= true;
1755 mr
->destructor
= memory_region_destructor_ram
;
1756 mr
->ram_block
= qemu_ram_alloc(size
, false, mr
, &err
);
1758 mr
->size
= int128_zero();
1759 object_unparent(OBJECT(mr
));
1760 error_propagate(errp
, err
);
1764 void memory_region_init_iommu(void *_iommu_mr
,
1765 size_t instance_size
,
1766 const char *mrtypename
,
1771 struct IOMMUMemoryRegion
*iommu_mr
;
1772 struct MemoryRegion
*mr
;
1774 object_initialize(_iommu_mr
, instance_size
, mrtypename
);
1775 mr
= MEMORY_REGION(_iommu_mr
);
1776 memory_region_do_init(mr
, owner
, name
, size
);
1777 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1778 mr
->terminates
= true; /* then re-forwards */
1779 QLIST_INIT(&iommu_mr
->iommu_notify
);
1780 iommu_mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1783 static void memory_region_finalize(Object
*obj
)
1785 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1787 assert(!mr
->container
);
1789 /* We know the region is not visible in any address space (it
1790 * does not have a container and cannot be a root either because
1791 * it has no references, so we can blindly clear mr->enabled.
1792 * memory_region_set_enabled instead could trigger a transaction
1793 * and cause an infinite loop.
1795 mr
->enabled
= false;
1796 memory_region_transaction_begin();
1797 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1798 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1799 memory_region_del_subregion(mr
, subregion
);
1801 memory_region_transaction_commit();
1804 memory_region_clear_coalescing(mr
);
1805 g_free((char *)mr
->name
);
1806 g_free(mr
->ioeventfds
);
1809 Object
*memory_region_owner(MemoryRegion
*mr
)
1811 Object
*obj
= OBJECT(mr
);
1815 void memory_region_ref(MemoryRegion
*mr
)
1817 /* MMIO callbacks most likely will access data that belongs
1818 * to the owner, hence the need to ref/unref the owner whenever
1819 * the memory region is in use.
1821 * The memory region is a child of its owner. As long as the
1822 * owner doesn't call unparent itself on the memory region,
1823 * ref-ing the owner will also keep the memory region alive.
1824 * Memory regions without an owner are supposed to never go away;
1825 * we do not ref/unref them because it slows down DMA sensibly.
1827 if (mr
&& mr
->owner
) {
1828 object_ref(mr
->owner
);
1832 void memory_region_unref(MemoryRegion
*mr
)
1834 if (mr
&& mr
->owner
) {
1835 object_unref(mr
->owner
);
1839 uint64_t memory_region_size(MemoryRegion
*mr
)
1841 if (int128_eq(mr
->size
, int128_2_64())) {
1844 return int128_get64(mr
->size
);
1847 const char *memory_region_name(const MemoryRegion
*mr
)
1850 ((MemoryRegion
*)mr
)->name
=
1851 g_strdup(object_get_canonical_path_component(OBJECT(mr
)));
1856 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1858 return mr
->ram_device
;
1861 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1863 uint8_t mask
= mr
->dirty_log_mask
;
1864 RAMBlock
*rb
= mr
->ram_block
;
1866 if (global_dirty_log
&& ((rb
&& qemu_ram_is_migratable(rb
)) ||
1867 memory_region_is_iommu(mr
))) {
1868 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1873 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1875 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1878 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion
*iommu_mr
,
1881 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1882 IOMMUNotifier
*iommu_notifier
;
1883 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1886 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1887 flags
|= iommu_notifier
->notifier_flags
;
1890 if (flags
!= iommu_mr
->iommu_notify_flags
&& imrc
->notify_flag_changed
) {
1891 ret
= imrc
->notify_flag_changed(iommu_mr
,
1892 iommu_mr
->iommu_notify_flags
,
1897 iommu_mr
->iommu_notify_flags
= flags
;
1902 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion
*iommu_mr
,
1903 uint64_t page_size_mask
,
1906 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1909 if (imrc
->iommu_set_page_size_mask
) {
1910 ret
= imrc
->iommu_set_page_size_mask(iommu_mr
, page_size_mask
, errp
);
1915 int memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1916 IOMMUNotifier
*n
, Error
**errp
)
1918 IOMMUMemoryRegion
*iommu_mr
;
1922 return memory_region_register_iommu_notifier(mr
->alias
, n
, errp
);
1925 /* We need to register for at least one bitfield */
1926 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1927 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1928 assert(n
->start
<= n
->end
);
1929 assert(n
->iommu_idx
>= 0 &&
1930 n
->iommu_idx
< memory_region_iommu_num_indexes(iommu_mr
));
1932 QLIST_INSERT_HEAD(&iommu_mr
->iommu_notify
, n
, node
);
1933 ret
= memory_region_update_iommu_notify_flags(iommu_mr
, errp
);
1935 QLIST_REMOVE(n
, node
);
1940 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
)
1942 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1944 if (imrc
->get_min_page_size
) {
1945 return imrc
->get_min_page_size(iommu_mr
);
1947 return TARGET_PAGE_SIZE
;
1950 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
1952 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
1953 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1954 hwaddr addr
, granularity
;
1955 IOMMUTLBEntry iotlb
;
1957 /* If the IOMMU has its own replay callback, override */
1959 imrc
->replay(iommu_mr
, n
);
1963 granularity
= memory_region_iommu_get_min_page_size(iommu_mr
);
1965 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1966 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
, n
->iommu_idx
);
1967 if (iotlb
.perm
!= IOMMU_NONE
) {
1968 n
->notify(n
, &iotlb
);
1971 /* if (2^64 - MR size) < granularity, it's possible to get an
1972 * infinite loop here. This should catch such a wraparound */
1973 if ((addr
+ granularity
) < addr
) {
1979 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1982 IOMMUMemoryRegion
*iommu_mr
;
1985 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
1988 QLIST_REMOVE(n
, node
);
1989 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1990 memory_region_update_iommu_notify_flags(iommu_mr
, NULL
);
1993 void memory_region_notify_one(IOMMUNotifier
*notifier
,
1994 IOMMUTLBEntry
*entry
)
1996 IOMMUNotifierFlag request_flags
;
1997 hwaddr entry_end
= entry
->iova
+ entry
->addr_mask
;
2000 * Skip the notification if the notification does not overlap
2001 * with registered range.
2003 if (notifier
->start
> entry_end
|| notifier
->end
< entry
->iova
) {
2007 assert(entry
->iova
>= notifier
->start
&& entry_end
<= notifier
->end
);
2009 if (entry
->perm
& IOMMU_RW
) {
2010 request_flags
= IOMMU_NOTIFIER_MAP
;
2012 request_flags
= IOMMU_NOTIFIER_UNMAP
;
2015 if (notifier
->notifier_flags
& request_flags
) {
2016 notifier
->notify(notifier
, entry
);
2020 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
2022 IOMMUTLBEntry entry
)
2024 IOMMUNotifier
*iommu_notifier
;
2026 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr
)));
2028 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
2029 if (iommu_notifier
->iommu_idx
== iommu_idx
) {
2030 memory_region_notify_one(iommu_notifier
, &entry
);
2035 int memory_region_iommu_get_attr(IOMMUMemoryRegion
*iommu_mr
,
2036 enum IOMMUMemoryRegionAttr attr
,
2039 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2041 if (!imrc
->get_attr
) {
2045 return imrc
->get_attr(iommu_mr
, attr
, data
);
2048 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion
*iommu_mr
,
2051 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2053 if (!imrc
->attrs_to_index
) {
2057 return imrc
->attrs_to_index(iommu_mr
, attrs
);
2060 int memory_region_iommu_num_indexes(IOMMUMemoryRegion
*iommu_mr
)
2062 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2064 if (!imrc
->num_indexes
) {
2068 return imrc
->num_indexes(iommu_mr
);
2071 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
2073 uint8_t mask
= 1 << client
;
2074 uint8_t old_logging
;
2076 assert(client
== DIRTY_MEMORY_VGA
);
2077 old_logging
= mr
->vga_logging_count
;
2078 mr
->vga_logging_count
+= log
? 1 : -1;
2079 if (!!old_logging
== !!mr
->vga_logging_count
) {
2083 memory_region_transaction_begin();
2084 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
2085 memory_region_update_pending
|= mr
->enabled
;
2086 memory_region_transaction_commit();
2089 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2092 assert(mr
->ram_block
);
2093 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
2095 memory_region_get_dirty_log_mask(mr
));
2098 static void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
2100 MemoryListener
*listener
;
2105 /* If the same address space has multiple log_sync listeners, we
2106 * visit that address space's FlatView multiple times. But because
2107 * log_sync listeners are rare, it's still cheaper than walking each
2108 * address space once.
2110 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2111 if (!listener
->log_sync
) {
2114 as
= listener
->address_space
;
2115 view
= address_space_get_flatview(as
);
2116 FOR_EACH_FLAT_RANGE(fr
, view
) {
2117 if (fr
->dirty_log_mask
&& (!mr
|| fr
->mr
== mr
)) {
2118 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
2119 listener
->log_sync(listener
, &mrs
);
2122 flatview_unref(view
);
2126 void memory_region_clear_dirty_bitmap(MemoryRegion
*mr
, hwaddr start
,
2129 MemoryRegionSection mrs
;
2130 MemoryListener
*listener
;
2134 hwaddr sec_start
, sec_end
, sec_size
;
2136 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2137 if (!listener
->log_clear
) {
2140 as
= listener
->address_space
;
2141 view
= address_space_get_flatview(as
);
2142 FOR_EACH_FLAT_RANGE(fr
, view
) {
2143 if (!fr
->dirty_log_mask
|| fr
->mr
!= mr
) {
2145 * Clear dirty bitmap operation only applies to those
2146 * regions whose dirty logging is at least enabled
2151 mrs
= section_from_flat_range(fr
, view
);
2153 sec_start
= MAX(mrs
.offset_within_region
, start
);
2154 sec_end
= mrs
.offset_within_region
+ int128_get64(mrs
.size
);
2155 sec_end
= MIN(sec_end
, start
+ len
);
2157 if (sec_start
>= sec_end
) {
2159 * If this memory region section has no intersection
2160 * with the requested range, skip.
2165 /* Valid case; shrink the section if needed */
2166 mrs
.offset_within_address_space
+=
2167 sec_start
- mrs
.offset_within_region
;
2168 mrs
.offset_within_region
= sec_start
;
2169 sec_size
= sec_end
- sec_start
;
2170 mrs
.size
= int128_make64(sec_size
);
2171 listener
->log_clear(listener
, &mrs
);
2173 flatview_unref(view
);
2177 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
2182 DirtyBitmapSnapshot
*snapshot
;
2183 assert(mr
->ram_block
);
2184 memory_region_sync_dirty_bitmap(mr
);
2185 snapshot
= cpu_physical_memory_snapshot_and_clear_dirty(mr
, addr
, size
, client
);
2186 memory_global_after_dirty_log_sync();
2190 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
, DirtyBitmapSnapshot
*snap
,
2191 hwaddr addr
, hwaddr size
)
2193 assert(mr
->ram_block
);
2194 return cpu_physical_memory_snapshot_get_dirty(snap
,
2195 memory_region_get_ram_addr(mr
) + addr
, size
);
2198 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
2200 if (mr
->readonly
!= readonly
) {
2201 memory_region_transaction_begin();
2202 mr
->readonly
= readonly
;
2203 memory_region_update_pending
|= mr
->enabled
;
2204 memory_region_transaction_commit();
2208 void memory_region_set_nonvolatile(MemoryRegion
*mr
, bool nonvolatile
)
2210 if (mr
->nonvolatile
!= nonvolatile
) {
2211 memory_region_transaction_begin();
2212 mr
->nonvolatile
= nonvolatile
;
2213 memory_region_update_pending
|= mr
->enabled
;
2214 memory_region_transaction_commit();
2218 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
2220 if (mr
->romd_mode
!= romd_mode
) {
2221 memory_region_transaction_begin();
2222 mr
->romd_mode
= romd_mode
;
2223 memory_region_update_pending
|= mr
->enabled
;
2224 memory_region_transaction_commit();
2228 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
2229 hwaddr size
, unsigned client
)
2231 assert(mr
->ram_block
);
2232 cpu_physical_memory_test_and_clear_dirty(
2233 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2236 int memory_region_get_fd(MemoryRegion
*mr
)
2240 RCU_READ_LOCK_GUARD();
2244 fd
= mr
->ram_block
->fd
;
2249 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
2252 uint64_t offset
= 0;
2254 RCU_READ_LOCK_GUARD();
2256 offset
+= mr
->alias_offset
;
2259 assert(mr
->ram_block
);
2260 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
2265 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
2269 block
= qemu_ram_block_from_host(ptr
, false, offset
);
2277 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
2279 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
2282 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
2284 assert(mr
->ram_block
);
2286 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
2289 void memory_region_msync(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
)
2291 if (mr
->ram_block
) {
2292 qemu_ram_msync(mr
->ram_block
, addr
, size
);
2296 void memory_region_writeback(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
)
2299 * Might be extended case needed to cover
2300 * different types of memory regions
2302 if (mr
->dirty_log_mask
) {
2303 memory_region_msync(mr
, addr
, size
);
2308 * Call proper memory listeners about the change on the newly
2309 * added/removed CoalescedMemoryRange.
2311 static void memory_region_update_coalesced_range(MemoryRegion
*mr
,
2312 CoalescedMemoryRange
*cmr
,
2319 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2320 view
= address_space_get_flatview(as
);
2321 FOR_EACH_FLAT_RANGE(fr
, view
) {
2323 flat_range_coalesced_io_notify(fr
, as
, cmr
, add
);
2326 flatview_unref(view
);
2330 void memory_region_set_coalescing(MemoryRegion
*mr
)
2332 memory_region_clear_coalescing(mr
);
2333 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
2336 void memory_region_add_coalescing(MemoryRegion
*mr
,
2340 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
2342 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
2343 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
2344 memory_region_update_coalesced_range(mr
, cmr
, true);
2345 memory_region_set_flush_coalesced(mr
);
2348 void memory_region_clear_coalescing(MemoryRegion
*mr
)
2350 CoalescedMemoryRange
*cmr
;
2352 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2356 qemu_flush_coalesced_mmio_buffer();
2357 mr
->flush_coalesced_mmio
= false;
2359 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
2360 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
2361 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
2362 memory_region_update_coalesced_range(mr
, cmr
, false);
2367 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
2369 mr
->flush_coalesced_mmio
= true;
2372 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
2374 qemu_flush_coalesced_mmio_buffer();
2375 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2376 mr
->flush_coalesced_mmio
= false;
2380 static bool userspace_eventfd_warning
;
2382 void memory_region_add_eventfd(MemoryRegion
*mr
,
2389 MemoryRegionIoeventfd mrfd
= {
2390 .addr
.start
= int128_make64(addr
),
2391 .addr
.size
= int128_make64(size
),
2392 .match_data
= match_data
,
2398 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2399 userspace_eventfd_warning
))) {
2400 userspace_eventfd_warning
= true;
2401 error_report("Using eventfd without MMIO binding in KVM. "
2402 "Suboptimal performance expected");
2406 adjust_endianness(mr
, &mrfd
.data
, size_memop(size
) | MO_TE
);
2408 memory_region_transaction_begin();
2409 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2410 if (memory_region_ioeventfd_before(&mrfd
, &mr
->ioeventfds
[i
])) {
2415 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2416 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2417 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2418 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2419 mr
->ioeventfds
[i
] = mrfd
;
2420 ioeventfd_update_pending
|= mr
->enabled
;
2421 memory_region_transaction_commit();
2424 void memory_region_del_eventfd(MemoryRegion
*mr
,
2431 MemoryRegionIoeventfd mrfd
= {
2432 .addr
.start
= int128_make64(addr
),
2433 .addr
.size
= int128_make64(size
),
2434 .match_data
= match_data
,
2441 adjust_endianness(mr
, &mrfd
.data
, size_memop(size
) | MO_TE
);
2443 memory_region_transaction_begin();
2444 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2445 if (memory_region_ioeventfd_equal(&mrfd
, &mr
->ioeventfds
[i
])) {
2449 assert(i
!= mr
->ioeventfd_nb
);
2450 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2451 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2453 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2454 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2455 ioeventfd_update_pending
|= mr
->enabled
;
2456 memory_region_transaction_commit();
2459 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2461 MemoryRegion
*mr
= subregion
->container
;
2462 MemoryRegion
*other
;
2464 memory_region_transaction_begin();
2466 memory_region_ref(subregion
);
2467 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2468 if (subregion
->priority
>= other
->priority
) {
2469 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2473 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2475 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2476 memory_region_transaction_commit();
2479 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2481 MemoryRegion
*subregion
)
2483 assert(!subregion
->container
);
2484 subregion
->container
= mr
;
2485 subregion
->addr
= offset
;
2486 memory_region_update_container_subregions(subregion
);
2489 void memory_region_add_subregion(MemoryRegion
*mr
,
2491 MemoryRegion
*subregion
)
2493 subregion
->priority
= 0;
2494 memory_region_add_subregion_common(mr
, offset
, subregion
);
2497 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2499 MemoryRegion
*subregion
,
2502 subregion
->priority
= priority
;
2503 memory_region_add_subregion_common(mr
, offset
, subregion
);
2506 void memory_region_del_subregion(MemoryRegion
*mr
,
2507 MemoryRegion
*subregion
)
2509 memory_region_transaction_begin();
2510 assert(subregion
->container
== mr
);
2511 subregion
->container
= NULL
;
2512 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2513 memory_region_unref(subregion
);
2514 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2515 memory_region_transaction_commit();
2518 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2520 if (enabled
== mr
->enabled
) {
2523 memory_region_transaction_begin();
2524 mr
->enabled
= enabled
;
2525 memory_region_update_pending
= true;
2526 memory_region_transaction_commit();
2529 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2531 Int128 s
= int128_make64(size
);
2533 if (size
== UINT64_MAX
) {
2536 if (int128_eq(s
, mr
->size
)) {
2539 memory_region_transaction_begin();
2541 memory_region_update_pending
= true;
2542 memory_region_transaction_commit();
2545 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2547 MemoryRegion
*container
= mr
->container
;
2550 memory_region_transaction_begin();
2551 memory_region_ref(mr
);
2552 memory_region_del_subregion(container
, mr
);
2553 mr
->container
= container
;
2554 memory_region_update_container_subregions(mr
);
2555 memory_region_unref(mr
);
2556 memory_region_transaction_commit();
2560 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2562 if (addr
!= mr
->addr
) {
2564 memory_region_readd_subregion(mr
);
2568 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2572 if (offset
== mr
->alias_offset
) {
2576 memory_region_transaction_begin();
2577 mr
->alias_offset
= offset
;
2578 memory_region_update_pending
|= mr
->enabled
;
2579 memory_region_transaction_commit();
2582 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2587 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2589 const AddrRange
*addr
= addr_
;
2590 const FlatRange
*fr
= fr_
;
2592 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2594 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2600 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2602 return bsearch(&addr
, view
->ranges
, view
->nr
,
2603 sizeof(FlatRange
), cmp_flatrange_addr
);
2606 bool memory_region_is_mapped(MemoryRegion
*mr
)
2608 return mr
->container
? true : false;
2611 /* Same as memory_region_find, but it does not add a reference to the
2612 * returned region. It must be called from an RCU critical section.
2614 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2615 hwaddr addr
, uint64_t size
)
2617 MemoryRegionSection ret
= { .mr
= NULL
};
2625 for (root
= mr
; root
->container
; ) {
2626 root
= root
->container
;
2630 as
= memory_region_to_address_space(root
);
2634 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2636 view
= address_space_to_flatview(as
);
2637 fr
= flatview_lookup(view
, range
);
2642 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2648 range
= addrrange_intersection(range
, fr
->addr
);
2649 ret
.offset_within_region
= fr
->offset_in_region
;
2650 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2652 ret
.size
= range
.size
;
2653 ret
.offset_within_address_space
= int128_get64(range
.start
);
2654 ret
.readonly
= fr
->readonly
;
2655 ret
.nonvolatile
= fr
->nonvolatile
;
2659 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2660 hwaddr addr
, uint64_t size
)
2662 MemoryRegionSection ret
;
2663 RCU_READ_LOCK_GUARD();
2664 ret
= memory_region_find_rcu(mr
, addr
, size
);
2666 memory_region_ref(ret
.mr
);
2671 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2675 RCU_READ_LOCK_GUARD();
2676 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2677 return mr
&& mr
!= container
;
2680 void memory_global_dirty_log_sync(void)
2682 memory_region_sync_dirty_bitmap(NULL
);
2685 void memory_global_after_dirty_log_sync(void)
2687 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync
, Forward
);
2690 static VMChangeStateEntry
*vmstate_change
;
2692 void memory_global_dirty_log_start(void)
2694 if (vmstate_change
) {
2695 qemu_del_vm_change_state_handler(vmstate_change
);
2696 vmstate_change
= NULL
;
2699 global_dirty_log
= true;
2701 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2703 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2704 memory_region_transaction_begin();
2705 memory_region_update_pending
= true;
2706 memory_region_transaction_commit();
2709 static void memory_global_dirty_log_do_stop(void)
2711 global_dirty_log
= false;
2713 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2714 memory_region_transaction_begin();
2715 memory_region_update_pending
= true;
2716 memory_region_transaction_commit();
2718 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2721 static void memory_vm_change_state_handler(void *opaque
, int running
,
2725 memory_global_dirty_log_do_stop();
2727 if (vmstate_change
) {
2728 qemu_del_vm_change_state_handler(vmstate_change
);
2729 vmstate_change
= NULL
;
2734 void memory_global_dirty_log_stop(void)
2736 if (!runstate_is_running()) {
2737 if (vmstate_change
) {
2740 vmstate_change
= qemu_add_vm_change_state_handler(
2741 memory_vm_change_state_handler
, NULL
);
2745 memory_global_dirty_log_do_stop();
2748 static void listener_add_address_space(MemoryListener
*listener
,
2754 if (listener
->begin
) {
2755 listener
->begin(listener
);
2757 if (global_dirty_log
) {
2758 if (listener
->log_global_start
) {
2759 listener
->log_global_start(listener
);
2763 view
= address_space_get_flatview(as
);
2764 FOR_EACH_FLAT_RANGE(fr
, view
) {
2765 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2767 if (listener
->region_add
) {
2768 listener
->region_add(listener
, §ion
);
2770 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2771 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2774 if (listener
->commit
) {
2775 listener
->commit(listener
);
2777 flatview_unref(view
);
2780 static void listener_del_address_space(MemoryListener
*listener
,
2786 if (listener
->begin
) {
2787 listener
->begin(listener
);
2789 view
= address_space_get_flatview(as
);
2790 FOR_EACH_FLAT_RANGE(fr
, view
) {
2791 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2793 if (fr
->dirty_log_mask
&& listener
->log_stop
) {
2794 listener
->log_stop(listener
, §ion
, fr
->dirty_log_mask
, 0);
2796 if (listener
->region_del
) {
2797 listener
->region_del(listener
, §ion
);
2800 if (listener
->commit
) {
2801 listener
->commit(listener
);
2803 flatview_unref(view
);
2806 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2808 MemoryListener
*other
= NULL
;
2810 listener
->address_space
= as
;
2811 if (QTAILQ_EMPTY(&memory_listeners
)
2812 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
)->priority
) {
2813 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2815 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2816 if (listener
->priority
< other
->priority
) {
2820 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2823 if (QTAILQ_EMPTY(&as
->listeners
)
2824 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
)->priority
) {
2825 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
2827 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
2828 if (listener
->priority
< other
->priority
) {
2832 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
2835 listener_add_address_space(listener
, as
);
2838 void memory_listener_unregister(MemoryListener
*listener
)
2840 if (!listener
->address_space
) {
2844 listener_del_address_space(listener
, listener
->address_space
);
2845 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2846 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
2847 listener
->address_space
= NULL
;
2850 void address_space_remove_listeners(AddressSpace
*as
)
2852 while (!QTAILQ_EMPTY(&as
->listeners
)) {
2853 memory_listener_unregister(QTAILQ_FIRST(&as
->listeners
));
2857 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2859 memory_region_ref(root
);
2861 as
->current_map
= NULL
;
2862 as
->ioeventfd_nb
= 0;
2863 as
->ioeventfds
= NULL
;
2864 QTAILQ_INIT(&as
->listeners
);
2865 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2866 as
->name
= g_strdup(name
? name
: "anonymous");
2867 address_space_update_topology(as
);
2868 address_space_update_ioeventfds(as
);
2871 static void do_address_space_destroy(AddressSpace
*as
)
2873 assert(QTAILQ_EMPTY(&as
->listeners
));
2875 flatview_unref(as
->current_map
);
2877 g_free(as
->ioeventfds
);
2878 memory_region_unref(as
->root
);
2881 void address_space_destroy(AddressSpace
*as
)
2883 MemoryRegion
*root
= as
->root
;
2885 /* Flush out anything from MemoryListeners listening in on this */
2886 memory_region_transaction_begin();
2888 memory_region_transaction_commit();
2889 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2891 /* At this point, as->dispatch and as->current_map are dummy
2892 * entries that the guest should never use. Wait for the old
2893 * values to expire before freeing the data.
2896 call_rcu(as
, do_address_space_destroy
, rcu
);
2899 static const char *memory_region_type(MemoryRegion
*mr
)
2902 return memory_region_type(mr
->alias
);
2904 if (memory_region_is_ram_device(mr
)) {
2906 } else if (memory_region_is_romd(mr
)) {
2908 } else if (memory_region_is_rom(mr
)) {
2910 } else if (memory_region_is_ram(mr
)) {
2917 typedef struct MemoryRegionList MemoryRegionList
;
2919 struct MemoryRegionList
{
2920 const MemoryRegion
*mr
;
2921 QTAILQ_ENTRY(MemoryRegionList
) mrqueue
;
2924 typedef QTAILQ_HEAD(, MemoryRegionList
) MemoryRegionListHead
;
2926 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2927 int128_sub((size), int128_one())) : 0)
2928 #define MTREE_INDENT " "
2930 static void mtree_expand_owner(const char *label
, Object
*obj
)
2932 DeviceState
*dev
= (DeviceState
*) object_dynamic_cast(obj
, TYPE_DEVICE
);
2934 qemu_printf(" %s:{%s", label
, dev
? "dev" : "obj");
2935 if (dev
&& dev
->id
) {
2936 qemu_printf(" id=%s", dev
->id
);
2938 char *canonical_path
= object_get_canonical_path(obj
);
2939 if (canonical_path
) {
2940 qemu_printf(" path=%s", canonical_path
);
2941 g_free(canonical_path
);
2943 qemu_printf(" type=%s", object_get_typename(obj
));
2949 static void mtree_print_mr_owner(const MemoryRegion
*mr
)
2951 Object
*owner
= mr
->owner
;
2952 Object
*parent
= memory_region_owner((MemoryRegion
*)mr
);
2954 if (!owner
&& !parent
) {
2955 qemu_printf(" orphan");
2959 mtree_expand_owner("owner", owner
);
2961 if (parent
&& parent
!= owner
) {
2962 mtree_expand_owner("parent", parent
);
2966 static void mtree_print_mr(const MemoryRegion
*mr
, unsigned int level
,
2968 MemoryRegionListHead
*alias_print_queue
,
2969 bool owner
, bool display_disabled
)
2971 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2972 MemoryRegionListHead submr_print_queue
;
2973 const MemoryRegion
*submr
;
2975 hwaddr cur_start
, cur_end
;
2981 cur_start
= base
+ mr
->addr
;
2982 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
2985 * Try to detect overflow of memory region. This should never
2986 * happen normally. When it happens, we dump something to warn the
2987 * user who is observing this.
2989 if (cur_start
< base
|| cur_end
< cur_start
) {
2990 qemu_printf("[DETECTED OVERFLOW!] ");
2994 MemoryRegionList
*ml
;
2997 /* check if the alias is already in the queue */
2998 QTAILQ_FOREACH(ml
, alias_print_queue
, mrqueue
) {
2999 if (ml
->mr
== mr
->alias
) {
3005 ml
= g_new(MemoryRegionList
, 1);
3007 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, mrqueue
);
3009 if (mr
->enabled
|| display_disabled
) {
3010 for (i
= 0; i
< level
; i
++) {
3011 qemu_printf(MTREE_INDENT
);
3013 qemu_printf(TARGET_FMT_plx
"-" TARGET_FMT_plx
3014 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
3015 "-" TARGET_FMT_plx
"%s",
3018 mr
->nonvolatile
? "nv-" : "",
3019 memory_region_type((MemoryRegion
*)mr
),
3020 memory_region_name(mr
),
3021 memory_region_name(mr
->alias
),
3023 mr
->alias_offset
+ MR_SIZE(mr
->size
),
3024 mr
->enabled
? "" : " [disabled]");
3026 mtree_print_mr_owner(mr
);
3031 if (mr
->enabled
|| display_disabled
) {
3032 for (i
= 0; i
< level
; i
++) {
3033 qemu_printf(MTREE_INDENT
);
3035 qemu_printf(TARGET_FMT_plx
"-" TARGET_FMT_plx
3036 " (prio %d, %s%s): %s%s",
3039 mr
->nonvolatile
? "nv-" : "",
3040 memory_region_type((MemoryRegion
*)mr
),
3041 memory_region_name(mr
),
3042 mr
->enabled
? "" : " [disabled]");
3044 mtree_print_mr_owner(mr
);
3050 QTAILQ_INIT(&submr_print_queue
);
3052 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
3053 new_ml
= g_new(MemoryRegionList
, 1);
3055 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
3056 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
3057 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
3058 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
3059 QTAILQ_INSERT_BEFORE(ml
, new_ml
, mrqueue
);
3065 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, mrqueue
);
3069 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
3070 mtree_print_mr(ml
->mr
, level
+ 1, cur_start
,
3071 alias_print_queue
, owner
, display_disabled
);
3074 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, mrqueue
, next_ml
) {
3079 struct FlatViewInfo
{
3086 static void mtree_print_flatview(gpointer key
, gpointer value
,
3089 FlatView
*view
= key
;
3090 GArray
*fv_address_spaces
= value
;
3091 struct FlatViewInfo
*fvi
= user_data
;
3092 FlatRange
*range
= &view
->ranges
[0];
3098 qemu_printf("FlatView #%d\n", fvi
->counter
);
3101 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3102 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3103 qemu_printf(" AS \"%s\", root: %s",
3104 as
->name
, memory_region_name(as
->root
));
3105 if (as
->root
->alias
) {
3106 qemu_printf(", alias %s", memory_region_name(as
->root
->alias
));
3111 qemu_printf(" Root memory region: %s\n",
3112 view
->root
? memory_region_name(view
->root
) : "(none)");
3115 qemu_printf(MTREE_INDENT
"No rendered FlatView\n\n");
3121 if (range
->offset_in_region
) {
3122 qemu_printf(MTREE_INDENT TARGET_FMT_plx
"-" TARGET_FMT_plx
3123 " (prio %d, %s%s): %s @" TARGET_FMT_plx
,
3124 int128_get64(range
->addr
.start
),
3125 int128_get64(range
->addr
.start
)
3126 + MR_SIZE(range
->addr
.size
),
3128 range
->nonvolatile
? "nv-" : "",
3129 range
->readonly
? "rom" : memory_region_type(mr
),
3130 memory_region_name(mr
),
3131 range
->offset_in_region
);
3133 qemu_printf(MTREE_INDENT TARGET_FMT_plx
"-" TARGET_FMT_plx
3134 " (prio %d, %s%s): %s",
3135 int128_get64(range
->addr
.start
),
3136 int128_get64(range
->addr
.start
)
3137 + MR_SIZE(range
->addr
.size
),
3139 range
->nonvolatile
? "nv-" : "",
3140 range
->readonly
? "rom" : memory_region_type(mr
),
3141 memory_region_name(mr
));
3144 mtree_print_mr_owner(mr
);
3148 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3149 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3150 if (fvi
->ac
->has_memory(current_machine
, as
,
3151 int128_get64(range
->addr
.start
),
3152 MR_SIZE(range
->addr
.size
) + 1)) {
3153 qemu_printf(" %s", fvi
->ac
->name
);
3161 #if !defined(CONFIG_USER_ONLY)
3162 if (fvi
->dispatch_tree
&& view
->root
) {
3163 mtree_print_dispatch(view
->dispatch
, view
->root
);
3170 static gboolean
mtree_info_flatview_free(gpointer key
, gpointer value
,
3173 FlatView
*view
= key
;
3174 GArray
*fv_address_spaces
= value
;
3176 g_array_unref(fv_address_spaces
);
3177 flatview_unref(view
);
3182 void mtree_info(bool flatview
, bool dispatch_tree
, bool owner
, bool disabled
)
3184 MemoryRegionListHead ml_head
;
3185 MemoryRegionList
*ml
, *ml2
;
3190 struct FlatViewInfo fvi
= {
3192 .dispatch_tree
= dispatch_tree
,
3195 GArray
*fv_address_spaces
;
3196 GHashTable
*views
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
3197 AccelClass
*ac
= ACCEL_GET_CLASS(current_accel());
3199 if (ac
->has_memory
) {
3203 /* Gather all FVs in one table */
3204 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3205 view
= address_space_get_flatview(as
);
3207 fv_address_spaces
= g_hash_table_lookup(views
, view
);
3208 if (!fv_address_spaces
) {
3209 fv_address_spaces
= g_array_new(false, false, sizeof(as
));
3210 g_hash_table_insert(views
, view
, fv_address_spaces
);
3213 g_array_append_val(fv_address_spaces
, as
);
3217 g_hash_table_foreach(views
, mtree_print_flatview
, &fvi
);
3220 g_hash_table_foreach_remove(views
, mtree_info_flatview_free
, 0);
3221 g_hash_table_unref(views
);
3226 QTAILQ_INIT(&ml_head
);
3228 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3229 qemu_printf("address-space: %s\n", as
->name
);
3230 mtree_print_mr(as
->root
, 1, 0, &ml_head
, owner
, disabled
);
3234 /* print aliased regions */
3235 QTAILQ_FOREACH(ml
, &ml_head
, mrqueue
) {
3236 qemu_printf("memory-region: %s\n", memory_region_name(ml
->mr
));
3237 mtree_print_mr(ml
->mr
, 1, 0, &ml_head
, owner
, disabled
);
3241 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, mrqueue
, ml2
) {
3246 void memory_region_init_ram(MemoryRegion
*mr
,
3247 struct Object
*owner
,
3252 DeviceState
*owner_dev
;
3255 memory_region_init_ram_nomigrate(mr
, owner
, name
, size
, &err
);
3257 error_propagate(errp
, err
);
3260 /* This will assert if owner is neither NULL nor a DeviceState.
3261 * We only want the owner here for the purposes of defining a
3262 * unique name for migration. TODO: Ideally we should implement
3263 * a naming scheme for Objects which are not DeviceStates, in
3264 * which case we can relax this restriction.
3266 owner_dev
= DEVICE(owner
);
3267 vmstate_register_ram(mr
, owner_dev
);
3270 void memory_region_init_rom(MemoryRegion
*mr
,
3271 struct Object
*owner
,
3276 DeviceState
*owner_dev
;
3279 memory_region_init_rom_nomigrate(mr
, owner
, name
, size
, &err
);
3281 error_propagate(errp
, err
);
3284 /* This will assert if owner is neither NULL nor a DeviceState.
3285 * We only want the owner here for the purposes of defining a
3286 * unique name for migration. TODO: Ideally we should implement
3287 * a naming scheme for Objects which are not DeviceStates, in
3288 * which case we can relax this restriction.
3290 owner_dev
= DEVICE(owner
);
3291 vmstate_register_ram(mr
, owner_dev
);
3294 void memory_region_init_rom_device(MemoryRegion
*mr
,
3295 struct Object
*owner
,
3296 const MemoryRegionOps
*ops
,
3302 DeviceState
*owner_dev
;
3305 memory_region_init_rom_device_nomigrate(mr
, owner
, ops
, opaque
,
3308 error_propagate(errp
, err
);
3311 /* This will assert if owner is neither NULL nor a DeviceState.
3312 * We only want the owner here for the purposes of defining a
3313 * unique name for migration. TODO: Ideally we should implement
3314 * a naming scheme for Objects which are not DeviceStates, in
3315 * which case we can relax this restriction.
3317 owner_dev
= DEVICE(owner
);
3318 vmstate_register_ram(mr
, owner_dev
);
3322 * Support softmmu builds with CONFIG_FUZZ using a weak symbol and a stub for
3323 * the fuzz_dma_read_cb callback
3326 void __attribute__((weak
)) fuzz_dma_read_cb(size_t addr
,
3334 static const TypeInfo memory_region_info
= {
3335 .parent
= TYPE_OBJECT
,
3336 .name
= TYPE_MEMORY_REGION
,
3337 .class_size
= sizeof(MemoryRegionClass
),
3338 .instance_size
= sizeof(MemoryRegion
),
3339 .instance_init
= memory_region_initfn
,
3340 .instance_finalize
= memory_region_finalize
,
3343 static const TypeInfo iommu_memory_region_info
= {
3344 .parent
= TYPE_MEMORY_REGION
,
3345 .name
= TYPE_IOMMU_MEMORY_REGION
,
3346 .class_size
= sizeof(IOMMUMemoryRegionClass
),
3347 .instance_size
= sizeof(IOMMUMemoryRegion
),
3348 .instance_init
= iommu_memory_region_initfn
,
3352 static void memory_register_types(void)
3354 type_register_static(&memory_region_info
);
3355 type_register_static(&iommu_memory_region_info
);
3358 type_init(memory_register_types
)