2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "exec/ioport.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qom/object.h"
27 #include "trace-root.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/misc/mmio_interface.h"
34 #include "hw/qdev-properties.h"
36 //#define DEBUG_UNASSIGNED
38 static unsigned memory_region_transaction_depth
;
39 static bool memory_region_update_pending
;
40 static bool ioeventfd_update_pending
;
41 static bool global_dirty_log
= false;
43 static QTAILQ_HEAD(memory_listeners
, MemoryListener
) memory_listeners
44 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
46 static QTAILQ_HEAD(, AddressSpace
) address_spaces
47 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
49 typedef struct AddrRange AddrRange
;
52 * Note that signed integers are needed for negative offsetting in aliases
53 * (large MemoryRegion::alias_offset).
60 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
62 return (AddrRange
) { start
, size
};
65 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
67 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
70 static Int128
addrrange_end(AddrRange r
)
72 return int128_add(r
.start
, r
.size
);
75 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
77 int128_addto(&range
.start
, delta
);
81 static bool addrrange_contains(AddrRange range
, Int128 addr
)
83 return int128_ge(addr
, range
.start
)
84 && int128_lt(addr
, addrrange_end(range
));
87 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
89 return addrrange_contains(r1
, r2
.start
)
90 || addrrange_contains(r2
, r1
.start
);
93 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
95 Int128 start
= int128_max(r1
.start
, r2
.start
);
96 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
97 return addrrange_make(start
, int128_sub(end
, start
));
100 enum ListenerDirection
{ Forward
, Reverse
};
102 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
104 MemoryListener *_listener; \
106 switch (_direction) { \
108 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
109 if (_listener->_callback) { \
110 _listener->_callback(_listener, ##_args); \
115 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
116 memory_listeners, link) { \
117 if (_listener->_callback) { \
118 _listener->_callback(_listener, ##_args); \
127 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
129 MemoryListener *_listener; \
130 struct memory_listeners_as *list = &(_as)->listeners; \
132 switch (_direction) { \
134 QTAILQ_FOREACH(_listener, list, link_as) { \
135 if (_listener->_callback) { \
136 _listener->_callback(_listener, _section, ##_args); \
141 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
143 if (_listener->_callback) { \
144 _listener->_callback(_listener, _section, ##_args); \
153 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
154 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
156 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
157 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
160 struct CoalescedMemoryRange
{
162 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
165 struct MemoryRegionIoeventfd
{
172 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
173 MemoryRegionIoeventfd b
)
175 if (int128_lt(a
.addr
.start
, b
.addr
.start
)) {
177 } else if (int128_gt(a
.addr
.start
, b
.addr
.start
)) {
179 } else if (int128_lt(a
.addr
.size
, b
.addr
.size
)) {
181 } else if (int128_gt(a
.addr
.size
, b
.addr
.size
)) {
183 } else if (a
.match_data
< b
.match_data
) {
185 } else if (a
.match_data
> b
.match_data
) {
187 } else if (a
.match_data
) {
188 if (a
.data
< b
.data
) {
190 } else if (a
.data
> b
.data
) {
196 } else if (a
.e
> b
.e
) {
202 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
203 MemoryRegionIoeventfd b
)
205 return !memory_region_ioeventfd_before(a
, b
)
206 && !memory_region_ioeventfd_before(b
, a
);
209 typedef struct FlatRange FlatRange
;
210 typedef struct FlatView FlatView
;
212 /* Range of memory in the global map. Addresses are absolute. */
215 hwaddr offset_in_region
;
217 uint8_t dirty_log_mask
;
222 /* Flattened global view of current active memory hierarchy. Kept in sorted
230 unsigned nr_allocated
;
233 typedef struct AddressSpaceOps AddressSpaceOps
;
235 #define FOR_EACH_FLAT_RANGE(var, view) \
236 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
238 static inline MemoryRegionSection
239 section_from_flat_range(FlatRange
*fr
, AddressSpace
*as
)
241 return (MemoryRegionSection
) {
244 .offset_within_region
= fr
->offset_in_region
,
245 .size
= fr
->addr
.size
,
246 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
247 .readonly
= fr
->readonly
,
251 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
253 return a
->mr
== b
->mr
254 && addrrange_equal(a
->addr
, b
->addr
)
255 && a
->offset_in_region
== b
->offset_in_region
256 && a
->romd_mode
== b
->romd_mode
257 && a
->readonly
== b
->readonly
;
260 static void flatview_init(FlatView
*view
)
265 view
->nr_allocated
= 0;
268 /* Insert a range into a given position. Caller is responsible for maintaining
271 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
273 if (view
->nr
== view
->nr_allocated
) {
274 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
275 view
->ranges
= g_realloc(view
->ranges
,
276 view
->nr_allocated
* sizeof(*view
->ranges
));
278 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
279 (view
->nr
- pos
) * sizeof(FlatRange
));
280 view
->ranges
[pos
] = *range
;
281 memory_region_ref(range
->mr
);
285 static void flatview_destroy(FlatView
*view
)
289 for (i
= 0; i
< view
->nr
; i
++) {
290 memory_region_unref(view
->ranges
[i
].mr
);
292 g_free(view
->ranges
);
296 static void flatview_ref(FlatView
*view
)
298 atomic_inc(&view
->ref
);
301 static void flatview_unref(FlatView
*view
)
303 if (atomic_fetch_dec(&view
->ref
) == 1) {
304 flatview_destroy(view
);
308 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
310 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
312 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
314 int128_make64(r2
->offset_in_region
))
315 && r1
->dirty_log_mask
== r2
->dirty_log_mask
316 && r1
->romd_mode
== r2
->romd_mode
317 && r1
->readonly
== r2
->readonly
;
320 /* Attempt to simplify a view by merging adjacent ranges */
321 static void flatview_simplify(FlatView
*view
)
326 while (i
< view
->nr
) {
329 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
330 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
334 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
335 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
340 static bool memory_region_big_endian(MemoryRegion
*mr
)
342 #ifdef TARGET_WORDS_BIGENDIAN
343 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
345 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
349 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
351 #ifdef TARGET_WORDS_BIGENDIAN
352 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
354 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
358 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
360 if (memory_region_wrong_endianness(mr
)) {
365 *data
= bswap16(*data
);
368 *data
= bswap32(*data
);
371 *data
= bswap64(*data
);
379 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
382 hwaddr abs_addr
= offset
;
384 abs_addr
+= mr
->addr
;
385 for (root
= mr
; root
->container
; ) {
386 root
= root
->container
;
387 abs_addr
+= root
->addr
;
393 static int get_cpu_index(void)
396 return current_cpu
->cpu_index
;
401 static MemTxResult
memory_region_oldmmio_read_accessor(MemoryRegion
*mr
,
411 tmp
= mr
->ops
->old_mmio
.read
[ctz32(size
)](mr
->opaque
, addr
);
413 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
414 } else if (mr
== &io_mem_notdirty
) {
415 /* Accesses to code which has previously been translated into a TB show
416 * up in the MMIO path, as accesses to the io_mem_notdirty
418 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
419 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
420 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
421 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
423 *value
|= (tmp
& mask
) << shift
;
427 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
437 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
439 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
440 } else if (mr
== &io_mem_notdirty
) {
441 /* Accesses to code which has previously been translated into a TB show
442 * up in the MMIO path, as accesses to the io_mem_notdirty
444 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
445 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
446 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
447 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
449 *value
|= (tmp
& mask
) << shift
;
453 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
464 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
466 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
467 } else if (mr
== &io_mem_notdirty
) {
468 /* Accesses to code which has previously been translated into a TB show
469 * up in the MMIO path, as accesses to the io_mem_notdirty
471 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
472 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
473 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
474 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
476 *value
|= (tmp
& mask
) << shift
;
480 static MemTxResult
memory_region_oldmmio_write_accessor(MemoryRegion
*mr
,
490 tmp
= (*value
>> shift
) & mask
;
492 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
493 } else if (mr
== &io_mem_notdirty
) {
494 /* Accesses to code which has previously been translated into a TB show
495 * up in the MMIO path, as accesses to the io_mem_notdirty
497 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
498 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
499 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
500 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
502 mr
->ops
->old_mmio
.write
[ctz32(size
)](mr
->opaque
, addr
, tmp
);
506 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
516 tmp
= (*value
>> shift
) & mask
;
518 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
519 } else if (mr
== &io_mem_notdirty
) {
520 /* Accesses to code which has previously been translated into a TB show
521 * up in the MMIO path, as accesses to the io_mem_notdirty
523 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
524 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
525 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
526 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
528 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
532 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
542 tmp
= (*value
>> shift
) & mask
;
544 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
545 } else if (mr
== &io_mem_notdirty
) {
546 /* Accesses to code which has previously been translated into a TB show
547 * up in the MMIO path, as accesses to the io_mem_notdirty
549 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
550 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
551 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
552 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
554 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
557 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
560 unsigned access_size_min
,
561 unsigned access_size_max
,
562 MemTxResult (*access
)(MemoryRegion
*mr
,
572 uint64_t access_mask
;
573 unsigned access_size
;
575 MemTxResult r
= MEMTX_OK
;
577 if (!access_size_min
) {
580 if (!access_size_max
) {
584 /* FIXME: support unaligned access? */
585 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
586 access_mask
= -1ULL >> (64 - access_size
* 8);
587 if (memory_region_big_endian(mr
)) {
588 for (i
= 0; i
< size
; i
+= access_size
) {
589 r
|= access(mr
, addr
+ i
, value
, access_size
,
590 (size
- access_size
- i
) * 8, access_mask
, attrs
);
593 for (i
= 0; i
< size
; i
+= access_size
) {
594 r
|= access(mr
, addr
+ i
, value
, access_size
, i
* 8,
601 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
605 while (mr
->container
) {
608 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
609 if (mr
== as
->root
) {
616 /* Render a memory region into the global view. Ranges in @view obscure
619 static void render_memory_region(FlatView
*view
,
625 MemoryRegion
*subregion
;
627 hwaddr offset_in_region
;
637 int128_addto(&base
, int128_make64(mr
->addr
));
638 readonly
|= mr
->readonly
;
640 tmp
= addrrange_make(base
, mr
->size
);
642 if (!addrrange_intersects(tmp
, clip
)) {
646 clip
= addrrange_intersection(tmp
, clip
);
649 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
650 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
651 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
655 /* Render subregions in priority order. */
656 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
657 render_memory_region(view
, subregion
, base
, clip
, readonly
);
660 if (!mr
->terminates
) {
664 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
669 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
670 fr
.romd_mode
= mr
->romd_mode
;
671 fr
.readonly
= readonly
;
673 /* Render the region itself into any gaps left by the current view. */
674 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
675 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
678 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
679 now
= int128_min(remain
,
680 int128_sub(view
->ranges
[i
].addr
.start
, base
));
681 fr
.offset_in_region
= offset_in_region
;
682 fr
.addr
= addrrange_make(base
, now
);
683 flatview_insert(view
, i
, &fr
);
685 int128_addto(&base
, now
);
686 offset_in_region
+= int128_get64(now
);
687 int128_subfrom(&remain
, now
);
689 now
= int128_sub(int128_min(int128_add(base
, remain
),
690 addrrange_end(view
->ranges
[i
].addr
)),
692 int128_addto(&base
, now
);
693 offset_in_region
+= int128_get64(now
);
694 int128_subfrom(&remain
, now
);
696 if (int128_nz(remain
)) {
697 fr
.offset_in_region
= offset_in_region
;
698 fr
.addr
= addrrange_make(base
, remain
);
699 flatview_insert(view
, i
, &fr
);
703 /* Render a memory topology into a list of disjoint absolute ranges. */
704 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
708 view
= g_new(FlatView
, 1);
712 render_memory_region(view
, mr
, int128_zero(),
713 addrrange_make(int128_zero(), int128_2_64()), false);
715 flatview_simplify(view
);
720 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
721 MemoryRegionIoeventfd
*fds_new
,
723 MemoryRegionIoeventfd
*fds_old
,
727 MemoryRegionIoeventfd
*fd
;
728 MemoryRegionSection section
;
730 /* Generate a symmetric difference of the old and new fd sets, adding
731 * and deleting as necessary.
735 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
736 if (iold
< fds_old_nb
737 && (inew
== fds_new_nb
738 || memory_region_ioeventfd_before(fds_old
[iold
],
741 section
= (MemoryRegionSection
) {
743 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
744 .size
= fd
->addr
.size
,
746 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
747 fd
->match_data
, fd
->data
, fd
->e
);
749 } else if (inew
< fds_new_nb
750 && (iold
== fds_old_nb
751 || memory_region_ioeventfd_before(fds_new
[inew
],
754 section
= (MemoryRegionSection
) {
756 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
757 .size
= fd
->addr
.size
,
759 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
760 fd
->match_data
, fd
->data
, fd
->e
);
769 static FlatView
*address_space_get_flatview(AddressSpace
*as
)
774 view
= atomic_rcu_read(&as
->current_map
);
780 static void address_space_update_ioeventfds(AddressSpace
*as
)
784 unsigned ioeventfd_nb
= 0;
785 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
789 view
= address_space_get_flatview(as
);
790 FOR_EACH_FLAT_RANGE(fr
, view
) {
791 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
792 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
793 int128_sub(fr
->addr
.start
,
794 int128_make64(fr
->offset_in_region
)));
795 if (addrrange_intersects(fr
->addr
, tmp
)) {
797 ioeventfds
= g_realloc(ioeventfds
,
798 ioeventfd_nb
* sizeof(*ioeventfds
));
799 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
800 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
805 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
806 as
->ioeventfds
, as
->ioeventfd_nb
);
808 g_free(as
->ioeventfds
);
809 as
->ioeventfds
= ioeventfds
;
810 as
->ioeventfd_nb
= ioeventfd_nb
;
811 flatview_unref(view
);
814 static void address_space_update_topology_pass(AddressSpace
*as
,
815 const FlatView
*old_view
,
816 const FlatView
*new_view
,
820 FlatRange
*frold
, *frnew
;
822 /* Generate a symmetric difference of the old and new memory maps.
823 * Kill ranges in the old map, and instantiate ranges in the new map.
826 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
827 if (iold
< old_view
->nr
) {
828 frold
= &old_view
->ranges
[iold
];
832 if (inew
< new_view
->nr
) {
833 frnew
= &new_view
->ranges
[inew
];
840 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
841 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
842 && !flatrange_equal(frold
, frnew
)))) {
843 /* In old but not in new, or in both but attributes changed. */
846 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
850 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
851 /* In both and unchanged (except logging may have changed) */
854 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
855 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
856 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
857 frold
->dirty_log_mask
,
858 frnew
->dirty_log_mask
);
860 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
861 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
862 frold
->dirty_log_mask
,
863 frnew
->dirty_log_mask
);
873 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
882 static void address_space_update_topology(AddressSpace
*as
)
884 FlatView
*old_view
= address_space_get_flatview(as
);
885 FlatView
*new_view
= generate_memory_topology(as
->root
);
887 address_space_update_topology_pass(as
, old_view
, new_view
, false);
888 address_space_update_topology_pass(as
, old_view
, new_view
, true);
890 /* Writes are protected by the BQL. */
891 atomic_rcu_set(&as
->current_map
, new_view
);
892 call_rcu(old_view
, flatview_unref
, rcu
);
894 /* Note that all the old MemoryRegions are still alive up to this
895 * point. This relieves most MemoryListeners from the need to
896 * ref/unref the MemoryRegions they get---unless they use them
897 * outside the iothread mutex, in which case precise reference
898 * counting is necessary.
900 flatview_unref(old_view
);
902 address_space_update_ioeventfds(as
);
905 void memory_region_transaction_begin(void)
907 qemu_flush_coalesced_mmio_buffer();
908 ++memory_region_transaction_depth
;
911 void memory_region_transaction_commit(void)
915 assert(memory_region_transaction_depth
);
916 assert(qemu_mutex_iothread_locked());
918 --memory_region_transaction_depth
;
919 if (!memory_region_transaction_depth
) {
920 if (memory_region_update_pending
) {
921 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
923 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
924 address_space_update_topology(as
);
926 memory_region_update_pending
= false;
927 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
928 } else if (ioeventfd_update_pending
) {
929 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
930 address_space_update_ioeventfds(as
);
932 ioeventfd_update_pending
= false;
937 static void memory_region_destructor_none(MemoryRegion
*mr
)
941 static void memory_region_destructor_ram(MemoryRegion
*mr
)
943 qemu_ram_free(mr
->ram_block
);
946 static bool memory_region_need_escape(char c
)
948 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
951 static char *memory_region_escape_name(const char *name
)
958 for (p
= name
; *p
; p
++) {
959 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
961 if (bytes
== p
- name
) {
962 return g_memdup(name
, bytes
+ 1);
965 escaped
= g_malloc(bytes
+ 1);
966 for (p
= name
, q
= escaped
; *p
; p
++) {
968 if (unlikely(memory_region_need_escape(c
))) {
971 *q
++ = "0123456789abcdef"[c
>> 4];
972 c
= "0123456789abcdef"[c
& 15];
980 void memory_region_init(MemoryRegion
*mr
,
985 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
986 mr
->size
= int128_make64(size
);
987 if (size
== UINT64_MAX
) {
988 mr
->size
= int128_2_64();
990 mr
->name
= g_strdup(name
);
992 mr
->ram_block
= NULL
;
995 char *escaped_name
= memory_region_escape_name(name
);
996 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
999 owner
= container_get(qdev_get_machine(), "/unattached");
1002 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
1003 object_unref(OBJECT(mr
));
1005 g_free(escaped_name
);
1009 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, const char *name
,
1010 void *opaque
, Error
**errp
)
1012 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1013 uint64_t value
= mr
->addr
;
1015 visit_type_uint64(v
, name
, &value
, errp
);
1018 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1019 const char *name
, void *opaque
,
1022 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1023 gchar
*path
= (gchar
*)"";
1025 if (mr
->container
) {
1026 path
= object_get_canonical_path(OBJECT(mr
->container
));
1028 visit_type_str(v
, name
, &path
, errp
);
1029 if (mr
->container
) {
1034 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1037 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1039 return OBJECT(mr
->container
);
1042 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1043 const char *name
, void *opaque
,
1046 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1047 int32_t value
= mr
->priority
;
1049 visit_type_int32(v
, name
, &value
, errp
);
1052 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1053 void *opaque
, Error
**errp
)
1055 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1056 uint64_t value
= memory_region_size(mr
);
1058 visit_type_uint64(v
, name
, &value
, errp
);
1061 static void memory_region_initfn(Object
*obj
)
1063 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1066 mr
->ops
= &unassigned_mem_ops
;
1068 mr
->romd_mode
= true;
1069 mr
->global_locking
= true;
1070 mr
->destructor
= memory_region_destructor_none
;
1071 QTAILQ_INIT(&mr
->subregions
);
1072 QTAILQ_INIT(&mr
->coalesced
);
1074 op
= object_property_add(OBJECT(mr
), "container",
1075 "link<" TYPE_MEMORY_REGION
">",
1076 memory_region_get_container
,
1077 NULL
, /* memory_region_set_container */
1078 NULL
, NULL
, &error_abort
);
1079 op
->resolve
= memory_region_resolve_container
;
1081 object_property_add(OBJECT(mr
), "addr", "uint64",
1082 memory_region_get_addr
,
1083 NULL
, /* memory_region_set_addr */
1084 NULL
, NULL
, &error_abort
);
1085 object_property_add(OBJECT(mr
), "priority", "uint32",
1086 memory_region_get_priority
,
1087 NULL
, /* memory_region_set_priority */
1088 NULL
, NULL
, &error_abort
);
1089 object_property_add(OBJECT(mr
), "size", "uint64",
1090 memory_region_get_size
,
1091 NULL
, /* memory_region_set_size, */
1092 NULL
, NULL
, &error_abort
);
1095 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1098 #ifdef DEBUG_UNASSIGNED
1099 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1101 if (current_cpu
!= NULL
) {
1102 cpu_unassigned_access(current_cpu
, addr
, false, false, 0, size
);
1107 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1108 uint64_t val
, unsigned size
)
1110 #ifdef DEBUG_UNASSIGNED
1111 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1113 if (current_cpu
!= NULL
) {
1114 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1118 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1119 unsigned size
, bool is_write
)
1124 const MemoryRegionOps unassigned_mem_ops
= {
1125 .valid
.accepts
= unassigned_mem_accepts
,
1126 .endianness
= DEVICE_NATIVE_ENDIAN
,
1129 static uint64_t memory_region_ram_device_read(void *opaque
,
1130 hwaddr addr
, unsigned size
)
1132 MemoryRegion
*mr
= opaque
;
1133 uint64_t data
= (uint64_t)~0;
1137 data
= *(uint8_t *)(mr
->ram_block
->host
+ addr
);
1140 data
= *(uint16_t *)(mr
->ram_block
->host
+ addr
);
1143 data
= *(uint32_t *)(mr
->ram_block
->host
+ addr
);
1146 data
= *(uint64_t *)(mr
->ram_block
->host
+ addr
);
1150 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1155 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1156 uint64_t data
, unsigned size
)
1158 MemoryRegion
*mr
= opaque
;
1160 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1164 *(uint8_t *)(mr
->ram_block
->host
+ addr
) = (uint8_t)data
;
1167 *(uint16_t *)(mr
->ram_block
->host
+ addr
) = (uint16_t)data
;
1170 *(uint32_t *)(mr
->ram_block
->host
+ addr
) = (uint32_t)data
;
1173 *(uint64_t *)(mr
->ram_block
->host
+ addr
) = data
;
1178 static const MemoryRegionOps ram_device_mem_ops
= {
1179 .read
= memory_region_ram_device_read
,
1180 .write
= memory_region_ram_device_write
,
1181 .endianness
= DEVICE_HOST_ENDIAN
,
1183 .min_access_size
= 1,
1184 .max_access_size
= 8,
1188 .min_access_size
= 1,
1189 .max_access_size
= 8,
1194 bool memory_region_access_valid(MemoryRegion
*mr
,
1199 int access_size_min
, access_size_max
;
1202 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1206 if (!mr
->ops
->valid
.accepts
) {
1210 access_size_min
= mr
->ops
->valid
.min_access_size
;
1211 if (!mr
->ops
->valid
.min_access_size
) {
1212 access_size_min
= 1;
1215 access_size_max
= mr
->ops
->valid
.max_access_size
;
1216 if (!mr
->ops
->valid
.max_access_size
) {
1217 access_size_max
= 4;
1220 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1221 for (i
= 0; i
< size
; i
+= access_size
) {
1222 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1231 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1239 if (mr
->ops
->read
) {
1240 return access_with_adjusted_size(addr
, pval
, size
,
1241 mr
->ops
->impl
.min_access_size
,
1242 mr
->ops
->impl
.max_access_size
,
1243 memory_region_read_accessor
,
1245 } else if (mr
->ops
->read_with_attrs
) {
1246 return access_with_adjusted_size(addr
, pval
, size
,
1247 mr
->ops
->impl
.min_access_size
,
1248 mr
->ops
->impl
.max_access_size
,
1249 memory_region_read_with_attrs_accessor
,
1252 return access_with_adjusted_size(addr
, pval
, size
, 1, 4,
1253 memory_region_oldmmio_read_accessor
,
1258 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1266 if (!memory_region_access_valid(mr
, addr
, size
, false)) {
1267 *pval
= unassigned_mem_read(mr
, addr
, size
);
1268 return MEMTX_DECODE_ERROR
;
1271 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1272 adjust_endianness(mr
, pval
, size
);
1276 /* Return true if an eventfd was signalled */
1277 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1283 MemoryRegionIoeventfd ioeventfd
= {
1284 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1289 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1290 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1291 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1293 if (memory_region_ioeventfd_equal(ioeventfd
, mr
->ioeventfds
[i
])) {
1294 event_notifier_set(ioeventfd
.e
);
1302 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1308 if (!memory_region_access_valid(mr
, addr
, size
, true)) {
1309 unassigned_mem_write(mr
, addr
, data
, size
);
1310 return MEMTX_DECODE_ERROR
;
1313 adjust_endianness(mr
, &data
, size
);
1315 if ((!kvm_eventfds_enabled()) &&
1316 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1320 if (mr
->ops
->write
) {
1321 return access_with_adjusted_size(addr
, &data
, size
,
1322 mr
->ops
->impl
.min_access_size
,
1323 mr
->ops
->impl
.max_access_size
,
1324 memory_region_write_accessor
, mr
,
1326 } else if (mr
->ops
->write_with_attrs
) {
1328 access_with_adjusted_size(addr
, &data
, size
,
1329 mr
->ops
->impl
.min_access_size
,
1330 mr
->ops
->impl
.max_access_size
,
1331 memory_region_write_with_attrs_accessor
,
1334 return access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1335 memory_region_oldmmio_write_accessor
,
1340 void memory_region_init_io(MemoryRegion
*mr
,
1342 const MemoryRegionOps
*ops
,
1347 memory_region_init(mr
, owner
, name
, size
);
1348 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1349 mr
->opaque
= opaque
;
1350 mr
->terminates
= true;
1353 void memory_region_init_ram(MemoryRegion
*mr
,
1359 memory_region_init(mr
, owner
, name
, size
);
1361 mr
->terminates
= true;
1362 mr
->destructor
= memory_region_destructor_ram
;
1363 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1364 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1367 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1372 void (*resized
)(const char*,
1377 memory_region_init(mr
, owner
, name
, size
);
1379 mr
->terminates
= true;
1380 mr
->destructor
= memory_region_destructor_ram
;
1381 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1383 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1387 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1388 struct Object
*owner
,
1395 memory_region_init(mr
, owner
, name
, size
);
1397 mr
->terminates
= true;
1398 mr
->destructor
= memory_region_destructor_ram
;
1399 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, share
, path
, errp
);
1400 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1403 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
1404 struct Object
*owner
,
1411 memory_region_init(mr
, owner
, name
, size
);
1413 mr
->terminates
= true;
1414 mr
->destructor
= memory_region_destructor_ram
;
1415 mr
->ram_block
= qemu_ram_alloc_from_fd(size
, mr
, share
, fd
, errp
);
1416 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1420 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1426 memory_region_init(mr
, owner
, name
, size
);
1428 mr
->terminates
= true;
1429 mr
->destructor
= memory_region_destructor_ram
;
1430 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1432 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1433 assert(ptr
!= NULL
);
1434 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1437 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1443 memory_region_init_ram_ptr(mr
, owner
, name
, size
, ptr
);
1444 mr
->ram_device
= true;
1445 mr
->ops
= &ram_device_mem_ops
;
1449 void memory_region_init_alias(MemoryRegion
*mr
,
1456 memory_region_init(mr
, owner
, name
, size
);
1458 mr
->alias_offset
= offset
;
1461 void memory_region_init_rom(MemoryRegion
*mr
,
1462 struct Object
*owner
,
1467 memory_region_init(mr
, owner
, name
, size
);
1469 mr
->readonly
= true;
1470 mr
->terminates
= true;
1471 mr
->destructor
= memory_region_destructor_ram
;
1472 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1473 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1476 void memory_region_init_rom_device(MemoryRegion
*mr
,
1478 const MemoryRegionOps
*ops
,
1485 memory_region_init(mr
, owner
, name
, size
);
1487 mr
->opaque
= opaque
;
1488 mr
->terminates
= true;
1489 mr
->rom_device
= true;
1490 mr
->destructor
= memory_region_destructor_ram
;
1491 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1494 void memory_region_init_iommu(MemoryRegion
*mr
,
1496 const MemoryRegionIOMMUOps
*ops
,
1500 memory_region_init(mr
, owner
, name
, size
);
1501 mr
->iommu_ops
= ops
,
1502 mr
->terminates
= true; /* then re-forwards */
1503 QLIST_INIT(&mr
->iommu_notify
);
1504 mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1507 static void memory_region_finalize(Object
*obj
)
1509 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1511 assert(!mr
->container
);
1513 /* We know the region is not visible in any address space (it
1514 * does not have a container and cannot be a root either because
1515 * it has no references, so we can blindly clear mr->enabled.
1516 * memory_region_set_enabled instead could trigger a transaction
1517 * and cause an infinite loop.
1519 mr
->enabled
= false;
1520 memory_region_transaction_begin();
1521 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1522 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1523 memory_region_del_subregion(mr
, subregion
);
1525 memory_region_transaction_commit();
1528 memory_region_clear_coalescing(mr
);
1529 g_free((char *)mr
->name
);
1530 g_free(mr
->ioeventfds
);
1533 Object
*memory_region_owner(MemoryRegion
*mr
)
1535 Object
*obj
= OBJECT(mr
);
1539 void memory_region_ref(MemoryRegion
*mr
)
1541 /* MMIO callbacks most likely will access data that belongs
1542 * to the owner, hence the need to ref/unref the owner whenever
1543 * the memory region is in use.
1545 * The memory region is a child of its owner. As long as the
1546 * owner doesn't call unparent itself on the memory region,
1547 * ref-ing the owner will also keep the memory region alive.
1548 * Memory regions without an owner are supposed to never go away;
1549 * we do not ref/unref them because it slows down DMA sensibly.
1551 if (mr
&& mr
->owner
) {
1552 object_ref(mr
->owner
);
1556 void memory_region_unref(MemoryRegion
*mr
)
1558 if (mr
&& mr
->owner
) {
1559 object_unref(mr
->owner
);
1563 uint64_t memory_region_size(MemoryRegion
*mr
)
1565 if (int128_eq(mr
->size
, int128_2_64())) {
1568 return int128_get64(mr
->size
);
1571 const char *memory_region_name(const MemoryRegion
*mr
)
1574 ((MemoryRegion
*)mr
)->name
=
1575 object_get_canonical_path_component(OBJECT(mr
));
1580 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1582 return mr
->ram_device
;
1585 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1587 uint8_t mask
= mr
->dirty_log_mask
;
1588 if (global_dirty_log
&& mr
->ram_block
) {
1589 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1594 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1596 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1599 static void memory_region_update_iommu_notify_flags(MemoryRegion
*mr
)
1601 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1602 IOMMUNotifier
*iommu_notifier
;
1604 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, mr
) {
1605 flags
|= iommu_notifier
->notifier_flags
;
1608 if (flags
!= mr
->iommu_notify_flags
&&
1609 mr
->iommu_ops
->notify_flag_changed
) {
1610 mr
->iommu_ops
->notify_flag_changed(mr
, mr
->iommu_notify_flags
,
1614 mr
->iommu_notify_flags
= flags
;
1617 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1621 memory_region_register_iommu_notifier(mr
->alias
, n
);
1625 /* We need to register for at least one bitfield */
1626 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1627 assert(n
->start
<= n
->end
);
1628 QLIST_INSERT_HEAD(&mr
->iommu_notify
, n
, node
);
1629 memory_region_update_iommu_notify_flags(mr
);
1632 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion
*mr
)
1634 assert(memory_region_is_iommu(mr
));
1635 if (mr
->iommu_ops
&& mr
->iommu_ops
->get_min_page_size
) {
1636 return mr
->iommu_ops
->get_min_page_size(mr
);
1638 return TARGET_PAGE_SIZE
;
1641 void memory_region_iommu_replay(MemoryRegion
*mr
, IOMMUNotifier
*n
)
1643 hwaddr addr
, granularity
;
1644 IOMMUTLBEntry iotlb
;
1646 /* If the IOMMU has its own replay callback, override */
1647 if (mr
->iommu_ops
->replay
) {
1648 mr
->iommu_ops
->replay(mr
, n
);
1652 granularity
= memory_region_iommu_get_min_page_size(mr
);
1654 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1655 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, IOMMU_NONE
);
1656 if (iotlb
.perm
!= IOMMU_NONE
) {
1657 n
->notify(n
, &iotlb
);
1660 /* if (2^64 - MR size) < granularity, it's possible to get an
1661 * infinite loop here. This should catch such a wraparound */
1662 if ((addr
+ granularity
) < addr
) {
1668 void memory_region_iommu_replay_all(MemoryRegion
*mr
)
1670 IOMMUNotifier
*notifier
;
1672 IOMMU_NOTIFIER_FOREACH(notifier
, mr
) {
1673 memory_region_iommu_replay(mr
, notifier
);
1677 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1681 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
1684 QLIST_REMOVE(n
, node
);
1685 memory_region_update_iommu_notify_flags(mr
);
1688 void memory_region_notify_one(IOMMUNotifier
*notifier
,
1689 IOMMUTLBEntry
*entry
)
1691 IOMMUNotifierFlag request_flags
;
1694 * Skip the notification if the notification does not overlap
1695 * with registered range.
1697 if (notifier
->start
> entry
->iova
+ entry
->addr_mask
+ 1 ||
1698 notifier
->end
< entry
->iova
) {
1702 if (entry
->perm
& IOMMU_RW
) {
1703 request_flags
= IOMMU_NOTIFIER_MAP
;
1705 request_flags
= IOMMU_NOTIFIER_UNMAP
;
1708 if (notifier
->notifier_flags
& request_flags
) {
1709 notifier
->notify(notifier
, entry
);
1713 void memory_region_notify_iommu(MemoryRegion
*mr
,
1714 IOMMUTLBEntry entry
)
1716 IOMMUNotifier
*iommu_notifier
;
1718 assert(memory_region_is_iommu(mr
));
1720 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, mr
) {
1721 memory_region_notify_one(iommu_notifier
, &entry
);
1725 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
1727 uint8_t mask
= 1 << client
;
1728 uint8_t old_logging
;
1730 assert(client
== DIRTY_MEMORY_VGA
);
1731 old_logging
= mr
->vga_logging_count
;
1732 mr
->vga_logging_count
+= log
? 1 : -1;
1733 if (!!old_logging
== !!mr
->vga_logging_count
) {
1737 memory_region_transaction_begin();
1738 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
1739 memory_region_update_pending
|= mr
->enabled
;
1740 memory_region_transaction_commit();
1743 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
1744 hwaddr size
, unsigned client
)
1746 assert(mr
->ram_block
);
1747 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr
) + addr
,
1751 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
1754 assert(mr
->ram_block
);
1755 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
1757 memory_region_get_dirty_log_mask(mr
));
1760 bool memory_region_test_and_clear_dirty(MemoryRegion
*mr
, hwaddr addr
,
1761 hwaddr size
, unsigned client
)
1763 assert(mr
->ram_block
);
1764 return cpu_physical_memory_test_and_clear_dirty(
1765 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1768 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
1773 assert(mr
->ram_block
);
1774 return cpu_physical_memory_snapshot_and_clear_dirty(
1775 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1778 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
, DirtyBitmapSnapshot
*snap
,
1779 hwaddr addr
, hwaddr size
)
1781 assert(mr
->ram_block
);
1782 return cpu_physical_memory_snapshot_get_dirty(snap
,
1783 memory_region_get_ram_addr(mr
) + addr
, size
);
1786 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
1788 MemoryListener
*listener
;
1793 /* If the same address space has multiple log_sync listeners, we
1794 * visit that address space's FlatView multiple times. But because
1795 * log_sync listeners are rare, it's still cheaper than walking each
1796 * address space once.
1798 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
1799 if (!listener
->log_sync
) {
1802 as
= listener
->address_space
;
1803 view
= address_space_get_flatview(as
);
1804 FOR_EACH_FLAT_RANGE(fr
, view
) {
1806 MemoryRegionSection mrs
= section_from_flat_range(fr
, as
);
1807 listener
->log_sync(listener
, &mrs
);
1810 flatview_unref(view
);
1814 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
1816 if (mr
->readonly
!= readonly
) {
1817 memory_region_transaction_begin();
1818 mr
->readonly
= readonly
;
1819 memory_region_update_pending
|= mr
->enabled
;
1820 memory_region_transaction_commit();
1824 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
1826 if (mr
->romd_mode
!= romd_mode
) {
1827 memory_region_transaction_begin();
1828 mr
->romd_mode
= romd_mode
;
1829 memory_region_update_pending
|= mr
->enabled
;
1830 memory_region_transaction_commit();
1834 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
1835 hwaddr size
, unsigned client
)
1837 assert(mr
->ram_block
);
1838 cpu_physical_memory_test_and_clear_dirty(
1839 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1842 int memory_region_get_fd(MemoryRegion
*mr
)
1850 fd
= mr
->ram_block
->fd
;
1856 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
1859 uint64_t offset
= 0;
1863 offset
+= mr
->alias_offset
;
1866 assert(mr
->ram_block
);
1867 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
1873 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
1877 block
= qemu_ram_block_from_host(ptr
, false, offset
);
1885 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
1887 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
1890 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
1892 assert(mr
->ram_block
);
1894 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
1897 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
1901 CoalescedMemoryRange
*cmr
;
1903 MemoryRegionSection section
;
1905 view
= address_space_get_flatview(as
);
1906 FOR_EACH_FLAT_RANGE(fr
, view
) {
1908 section
= (MemoryRegionSection
) {
1909 .address_space
= as
,
1910 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
1911 .size
= fr
->addr
.size
,
1914 MEMORY_LISTENER_CALL(as
, coalesced_mmio_del
, Reverse
, §ion
,
1915 int128_get64(fr
->addr
.start
),
1916 int128_get64(fr
->addr
.size
));
1917 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
1918 tmp
= addrrange_shift(cmr
->addr
,
1919 int128_sub(fr
->addr
.start
,
1920 int128_make64(fr
->offset_in_region
)));
1921 if (!addrrange_intersects(tmp
, fr
->addr
)) {
1924 tmp
= addrrange_intersection(tmp
, fr
->addr
);
1925 MEMORY_LISTENER_CALL(as
, coalesced_mmio_add
, Forward
, §ion
,
1926 int128_get64(tmp
.start
),
1927 int128_get64(tmp
.size
));
1931 flatview_unref(view
);
1934 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
1938 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1939 memory_region_update_coalesced_range_as(mr
, as
);
1943 void memory_region_set_coalescing(MemoryRegion
*mr
)
1945 memory_region_clear_coalescing(mr
);
1946 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
1949 void memory_region_add_coalescing(MemoryRegion
*mr
,
1953 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
1955 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
1956 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
1957 memory_region_update_coalesced_range(mr
);
1958 memory_region_set_flush_coalesced(mr
);
1961 void memory_region_clear_coalescing(MemoryRegion
*mr
)
1963 CoalescedMemoryRange
*cmr
;
1964 bool updated
= false;
1966 qemu_flush_coalesced_mmio_buffer();
1967 mr
->flush_coalesced_mmio
= false;
1969 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
1970 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
1971 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
1977 memory_region_update_coalesced_range(mr
);
1981 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
1983 mr
->flush_coalesced_mmio
= true;
1986 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
1988 qemu_flush_coalesced_mmio_buffer();
1989 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
1990 mr
->flush_coalesced_mmio
= false;
1994 void memory_region_set_global_locking(MemoryRegion
*mr
)
1996 mr
->global_locking
= true;
1999 void memory_region_clear_global_locking(MemoryRegion
*mr
)
2001 mr
->global_locking
= false;
2004 static bool userspace_eventfd_warning
;
2006 void memory_region_add_eventfd(MemoryRegion
*mr
,
2013 MemoryRegionIoeventfd mrfd
= {
2014 .addr
.start
= int128_make64(addr
),
2015 .addr
.size
= int128_make64(size
),
2016 .match_data
= match_data
,
2022 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2023 userspace_eventfd_warning
))) {
2024 userspace_eventfd_warning
= true;
2025 error_report("Using eventfd without MMIO binding in KVM. "
2026 "Suboptimal performance expected");
2030 adjust_endianness(mr
, &mrfd
.data
, size
);
2032 memory_region_transaction_begin();
2033 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2034 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
2039 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2040 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2041 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2042 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2043 mr
->ioeventfds
[i
] = mrfd
;
2044 ioeventfd_update_pending
|= mr
->enabled
;
2045 memory_region_transaction_commit();
2048 void memory_region_del_eventfd(MemoryRegion
*mr
,
2055 MemoryRegionIoeventfd mrfd
= {
2056 .addr
.start
= int128_make64(addr
),
2057 .addr
.size
= int128_make64(size
),
2058 .match_data
= match_data
,
2065 adjust_endianness(mr
, &mrfd
.data
, size
);
2067 memory_region_transaction_begin();
2068 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2069 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
2073 assert(i
!= mr
->ioeventfd_nb
);
2074 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2075 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2077 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2078 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2079 ioeventfd_update_pending
|= mr
->enabled
;
2080 memory_region_transaction_commit();
2083 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2085 MemoryRegion
*mr
= subregion
->container
;
2086 MemoryRegion
*other
;
2088 memory_region_transaction_begin();
2090 memory_region_ref(subregion
);
2091 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2092 if (subregion
->priority
>= other
->priority
) {
2093 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2097 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2099 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2100 memory_region_transaction_commit();
2103 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2105 MemoryRegion
*subregion
)
2107 assert(!subregion
->container
);
2108 subregion
->container
= mr
;
2109 subregion
->addr
= offset
;
2110 memory_region_update_container_subregions(subregion
);
2113 void memory_region_add_subregion(MemoryRegion
*mr
,
2115 MemoryRegion
*subregion
)
2117 subregion
->priority
= 0;
2118 memory_region_add_subregion_common(mr
, offset
, subregion
);
2121 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2123 MemoryRegion
*subregion
,
2126 subregion
->priority
= priority
;
2127 memory_region_add_subregion_common(mr
, offset
, subregion
);
2130 void memory_region_del_subregion(MemoryRegion
*mr
,
2131 MemoryRegion
*subregion
)
2133 memory_region_transaction_begin();
2134 assert(subregion
->container
== mr
);
2135 subregion
->container
= NULL
;
2136 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2137 memory_region_unref(subregion
);
2138 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2139 memory_region_transaction_commit();
2142 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2144 if (enabled
== mr
->enabled
) {
2147 memory_region_transaction_begin();
2148 mr
->enabled
= enabled
;
2149 memory_region_update_pending
= true;
2150 memory_region_transaction_commit();
2153 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2155 Int128 s
= int128_make64(size
);
2157 if (size
== UINT64_MAX
) {
2160 if (int128_eq(s
, mr
->size
)) {
2163 memory_region_transaction_begin();
2165 memory_region_update_pending
= true;
2166 memory_region_transaction_commit();
2169 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2171 MemoryRegion
*container
= mr
->container
;
2174 memory_region_transaction_begin();
2175 memory_region_ref(mr
);
2176 memory_region_del_subregion(container
, mr
);
2177 mr
->container
= container
;
2178 memory_region_update_container_subregions(mr
);
2179 memory_region_unref(mr
);
2180 memory_region_transaction_commit();
2184 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2186 if (addr
!= mr
->addr
) {
2188 memory_region_readd_subregion(mr
);
2192 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2196 if (offset
== mr
->alias_offset
) {
2200 memory_region_transaction_begin();
2201 mr
->alias_offset
= offset
;
2202 memory_region_update_pending
|= mr
->enabled
;
2203 memory_region_transaction_commit();
2206 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2211 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2213 const AddrRange
*addr
= addr_
;
2214 const FlatRange
*fr
= fr_
;
2216 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2218 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2224 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2226 return bsearch(&addr
, view
->ranges
, view
->nr
,
2227 sizeof(FlatRange
), cmp_flatrange_addr
);
2230 bool memory_region_is_mapped(MemoryRegion
*mr
)
2232 return mr
->container
? true : false;
2235 /* Same as memory_region_find, but it does not add a reference to the
2236 * returned region. It must be called from an RCU critical section.
2238 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2239 hwaddr addr
, uint64_t size
)
2241 MemoryRegionSection ret
= { .mr
= NULL
};
2249 for (root
= mr
; root
->container
; ) {
2250 root
= root
->container
;
2254 as
= memory_region_to_address_space(root
);
2258 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2260 view
= atomic_rcu_read(&as
->current_map
);
2261 fr
= flatview_lookup(view
, range
);
2266 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2271 ret
.address_space
= as
;
2272 range
= addrrange_intersection(range
, fr
->addr
);
2273 ret
.offset_within_region
= fr
->offset_in_region
;
2274 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2276 ret
.size
= range
.size
;
2277 ret
.offset_within_address_space
= int128_get64(range
.start
);
2278 ret
.readonly
= fr
->readonly
;
2282 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2283 hwaddr addr
, uint64_t size
)
2285 MemoryRegionSection ret
;
2287 ret
= memory_region_find_rcu(mr
, addr
, size
);
2289 memory_region_ref(ret
.mr
);
2295 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2300 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2302 return mr
&& mr
!= container
;
2305 void memory_global_dirty_log_sync(void)
2307 MemoryListener
*listener
;
2312 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2313 if (!listener
->log_sync
) {
2316 as
= listener
->address_space
;
2317 view
= address_space_get_flatview(as
);
2318 FOR_EACH_FLAT_RANGE(fr
, view
) {
2319 if (fr
->dirty_log_mask
) {
2320 MemoryRegionSection mrs
= section_from_flat_range(fr
, as
);
2321 listener
->log_sync(listener
, &mrs
);
2324 flatview_unref(view
);
2328 void memory_global_dirty_log_start(void)
2330 global_dirty_log
= true;
2332 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2334 /* Refresh DIRTY_LOG_MIGRATION bit. */
2335 memory_region_transaction_begin();
2336 memory_region_update_pending
= true;
2337 memory_region_transaction_commit();
2340 void memory_global_dirty_log_stop(void)
2342 global_dirty_log
= false;
2344 /* Refresh DIRTY_LOG_MIGRATION bit. */
2345 memory_region_transaction_begin();
2346 memory_region_update_pending
= true;
2347 memory_region_transaction_commit();
2349 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2352 static void listener_add_address_space(MemoryListener
*listener
,
2358 if (listener
->begin
) {
2359 listener
->begin(listener
);
2361 if (global_dirty_log
) {
2362 if (listener
->log_global_start
) {
2363 listener
->log_global_start(listener
);
2367 view
= address_space_get_flatview(as
);
2368 FOR_EACH_FLAT_RANGE(fr
, view
) {
2369 MemoryRegionSection section
= {
2371 .address_space
= as
,
2372 .offset_within_region
= fr
->offset_in_region
,
2373 .size
= fr
->addr
.size
,
2374 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
2375 .readonly
= fr
->readonly
,
2377 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2378 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2380 if (listener
->region_add
) {
2381 listener
->region_add(listener
, §ion
);
2384 if (listener
->commit
) {
2385 listener
->commit(listener
);
2387 flatview_unref(view
);
2390 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2392 MemoryListener
*other
= NULL
;
2394 listener
->address_space
= as
;
2395 if (QTAILQ_EMPTY(&memory_listeners
)
2396 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
,
2397 memory_listeners
)->priority
) {
2398 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2400 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2401 if (listener
->priority
< other
->priority
) {
2405 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2408 if (QTAILQ_EMPTY(&as
->listeners
)
2409 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
,
2410 memory_listeners
)->priority
) {
2411 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
2413 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
2414 if (listener
->priority
< other
->priority
) {
2418 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
2421 listener_add_address_space(listener
, as
);
2424 void memory_listener_unregister(MemoryListener
*listener
)
2426 if (!listener
->address_space
) {
2430 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2431 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
2432 listener
->address_space
= NULL
;
2435 bool memory_region_request_mmio_ptr(MemoryRegion
*mr
, hwaddr addr
)
2439 unsigned offset
= 0;
2440 Object
*new_interface
;
2442 if (!mr
|| !mr
->ops
->request_ptr
) {
2447 * Avoid an update if the request_ptr call
2448 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2451 memory_region_transaction_begin();
2453 host
= mr
->ops
->request_ptr(mr
->opaque
, addr
- mr
->addr
, &size
, &offset
);
2455 if (!host
|| !size
) {
2456 memory_region_transaction_commit();
2460 new_interface
= object_new("mmio_interface");
2461 qdev_prop_set_uint64(DEVICE(new_interface
), "start", offset
);
2462 qdev_prop_set_uint64(DEVICE(new_interface
), "end", offset
+ size
- 1);
2463 qdev_prop_set_bit(DEVICE(new_interface
), "ro", true);
2464 qdev_prop_set_ptr(DEVICE(new_interface
), "host_ptr", host
);
2465 qdev_prop_set_ptr(DEVICE(new_interface
), "subregion", mr
);
2466 object_property_set_bool(OBJECT(new_interface
), true, "realized", NULL
);
2468 memory_region_transaction_commit();
2472 typedef struct MMIOPtrInvalidate
{
2478 } MMIOPtrInvalidate
;
2480 #define MAX_MMIO_INVALIDATE 10
2481 static MMIOPtrInvalidate mmio_ptr_invalidate_list
[MAX_MMIO_INVALIDATE
];
2483 static void memory_region_do_invalidate_mmio_ptr(CPUState
*cpu
,
2484 run_on_cpu_data data
)
2486 MMIOPtrInvalidate
*invalidate_data
= (MMIOPtrInvalidate
*)data
.host_ptr
;
2487 MemoryRegion
*mr
= invalidate_data
->mr
;
2488 hwaddr offset
= invalidate_data
->offset
;
2489 unsigned size
= invalidate_data
->size
;
2490 MemoryRegionSection section
= memory_region_find(mr
, offset
, size
);
2492 qemu_mutex_lock_iothread();
2494 /* Reset dirty so this doesn't happen later. */
2495 cpu_physical_memory_test_and_clear_dirty(offset
, size
, 1);
2497 if (section
.mr
!= mr
) {
2498 /* memory_region_find add a ref on section.mr */
2499 memory_region_unref(section
.mr
);
2500 if (MMIO_INTERFACE(section
.mr
->owner
)) {
2501 /* We found the interface just drop it. */
2502 object_property_set_bool(section
.mr
->owner
, false, "realized",
2504 object_unref(section
.mr
->owner
);
2505 object_unparent(section
.mr
->owner
);
2509 qemu_mutex_unlock_iothread();
2511 if (invalidate_data
->allocated
) {
2512 g_free(invalidate_data
);
2514 invalidate_data
->busy
= 0;
2518 void memory_region_invalidate_mmio_ptr(MemoryRegion
*mr
, hwaddr offset
,
2522 MMIOPtrInvalidate
*invalidate_data
= NULL
;
2524 for (i
= 0; i
< MAX_MMIO_INVALIDATE
; i
++) {
2525 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list
[i
].busy
), 0, 1) == 0) {
2526 invalidate_data
= &mmio_ptr_invalidate_list
[i
];
2531 if (!invalidate_data
) {
2532 invalidate_data
= g_malloc0(sizeof(MMIOPtrInvalidate
));
2533 invalidate_data
->allocated
= 1;
2536 invalidate_data
->mr
= mr
;
2537 invalidate_data
->offset
= offset
;
2538 invalidate_data
->size
= size
;
2540 async_safe_run_on_cpu(first_cpu
, memory_region_do_invalidate_mmio_ptr
,
2541 RUN_ON_CPU_HOST_PTR(invalidate_data
));
2544 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2546 memory_region_ref(root
);
2547 memory_region_transaction_begin();
2550 as
->malloced
= false;
2551 as
->current_map
= g_new(FlatView
, 1);
2552 flatview_init(as
->current_map
);
2553 as
->ioeventfd_nb
= 0;
2554 as
->ioeventfds
= NULL
;
2555 QTAILQ_INIT(&as
->listeners
);
2556 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2557 as
->name
= g_strdup(name
? name
: "anonymous");
2558 address_space_init_dispatch(as
);
2559 memory_region_update_pending
|= root
->enabled
;
2560 memory_region_transaction_commit();
2563 static void do_address_space_destroy(AddressSpace
*as
)
2565 bool do_free
= as
->malloced
;
2567 address_space_destroy_dispatch(as
);
2568 assert(QTAILQ_EMPTY(&as
->listeners
));
2570 flatview_unref(as
->current_map
);
2572 g_free(as
->ioeventfds
);
2573 memory_region_unref(as
->root
);
2579 AddressSpace
*address_space_init_shareable(MemoryRegion
*root
, const char *name
)
2583 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2584 if (root
== as
->root
&& as
->malloced
) {
2590 as
= g_malloc0(sizeof *as
);
2591 address_space_init(as
, root
, name
);
2592 as
->malloced
= true;
2596 void address_space_destroy(AddressSpace
*as
)
2598 MemoryRegion
*root
= as
->root
;
2601 if (as
->ref_count
) {
2604 /* Flush out anything from MemoryListeners listening in on this */
2605 memory_region_transaction_begin();
2607 memory_region_transaction_commit();
2608 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2609 address_space_unregister(as
);
2611 /* At this point, as->dispatch and as->current_map are dummy
2612 * entries that the guest should never use. Wait for the old
2613 * values to expire before freeing the data.
2616 call_rcu(as
, do_address_space_destroy
, rcu
);
2619 static const char *memory_region_type(MemoryRegion
*mr
)
2621 if (memory_region_is_ram_device(mr
)) {
2623 } else if (memory_region_is_romd(mr
)) {
2625 } else if (memory_region_is_rom(mr
)) {
2627 } else if (memory_region_is_ram(mr
)) {
2634 typedef struct MemoryRegionList MemoryRegionList
;
2636 struct MemoryRegionList
{
2637 const MemoryRegion
*mr
;
2638 QTAILQ_ENTRY(MemoryRegionList
) queue
;
2641 typedef QTAILQ_HEAD(queue
, MemoryRegionList
) MemoryRegionListHead
;
2643 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2644 int128_sub((size), int128_one())) : 0)
2645 #define MTREE_INDENT " "
2647 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
2648 const MemoryRegion
*mr
, unsigned int level
,
2650 MemoryRegionListHead
*alias_print_queue
)
2652 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2653 MemoryRegionListHead submr_print_queue
;
2654 const MemoryRegion
*submr
;
2656 hwaddr cur_start
, cur_end
;
2662 for (i
= 0; i
< level
; i
++) {
2663 mon_printf(f
, MTREE_INDENT
);
2666 cur_start
= base
+ mr
->addr
;
2667 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
2670 * Try to detect overflow of memory region. This should never
2671 * happen normally. When it happens, we dump something to warn the
2672 * user who is observing this.
2674 if (cur_start
< base
|| cur_end
< cur_start
) {
2675 mon_printf(f
, "[DETECTED OVERFLOW!] ");
2679 MemoryRegionList
*ml
;
2682 /* check if the alias is already in the queue */
2683 QTAILQ_FOREACH(ml
, alias_print_queue
, queue
) {
2684 if (ml
->mr
== mr
->alias
) {
2690 ml
= g_new(MemoryRegionList
, 1);
2692 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, queue
);
2694 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
2695 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2696 "-" TARGET_FMT_plx
"%s\n",
2699 memory_region_type((MemoryRegion
*)mr
),
2700 memory_region_name(mr
),
2701 memory_region_name(mr
->alias
),
2703 mr
->alias_offset
+ MR_SIZE(mr
->size
),
2704 mr
->enabled
? "" : " [disabled]");
2707 TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d, %s): %s%s\n",
2710 memory_region_type((MemoryRegion
*)mr
),
2711 memory_region_name(mr
),
2712 mr
->enabled
? "" : " [disabled]");
2715 QTAILQ_INIT(&submr_print_queue
);
2717 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2718 new_ml
= g_new(MemoryRegionList
, 1);
2720 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2721 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2722 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2723 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2724 QTAILQ_INSERT_BEFORE(ml
, new_ml
, queue
);
2730 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, queue
);
2734 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2735 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, cur_start
,
2739 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, queue
, next_ml
) {
2744 static void mtree_print_flatview(fprintf_function p
, void *f
,
2747 FlatView
*view
= address_space_get_flatview(as
);
2748 FlatRange
*range
= &view
->ranges
[0];
2753 p(f
, MTREE_INDENT
"No rendered FlatView for "
2754 "address space '%s'\n", as
->name
);
2755 flatview_unref(view
);
2761 if (range
->offset_in_region
) {
2762 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
2763 TARGET_FMT_plx
" (prio %d, %s): %s @" TARGET_FMT_plx
"\n",
2764 int128_get64(range
->addr
.start
),
2765 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
2767 range
->readonly
? "rom" : memory_region_type(mr
),
2768 memory_region_name(mr
),
2769 range
->offset_in_region
);
2771 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
2772 TARGET_FMT_plx
" (prio %d, %s): %s\n",
2773 int128_get64(range
->addr
.start
),
2774 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
2776 range
->readonly
? "rom" : memory_region_type(mr
),
2777 memory_region_name(mr
));
2782 flatview_unref(view
);
2785 void mtree_info(fprintf_function mon_printf
, void *f
, bool flatview
)
2787 MemoryRegionListHead ml_head
;
2788 MemoryRegionList
*ml
, *ml2
;
2792 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2793 mon_printf(f
, "address-space (flat view): %s\n", as
->name
);
2794 mtree_print_flatview(mon_printf
, f
, as
);
2795 mon_printf(f
, "\n");
2800 QTAILQ_INIT(&ml_head
);
2802 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2803 mon_printf(f
, "address-space: %s\n", as
->name
);
2804 mtree_print_mr(mon_printf
, f
, as
->root
, 1, 0, &ml_head
);
2805 mon_printf(f
, "\n");
2808 /* print aliased regions */
2809 QTAILQ_FOREACH(ml
, &ml_head
, queue
) {
2810 mon_printf(f
, "memory-region: %s\n", memory_region_name(ml
->mr
));
2811 mtree_print_mr(mon_printf
, f
, ml
->mr
, 1, 0, &ml_head
);
2812 mon_printf(f
, "\n");
2815 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, queue
, ml2
) {
2820 static const TypeInfo memory_region_info
= {
2821 .parent
= TYPE_OBJECT
,
2822 .name
= TYPE_MEMORY_REGION
,
2823 .instance_size
= sizeof(MemoryRegion
),
2824 .instance_init
= memory_region_initfn
,
2825 .instance_finalize
= memory_region_finalize
,
2828 static void memory_register_types(void)
2830 type_register_static(&memory_region_info
);
2833 type_init(memory_register_types
)