2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
20 #include "exec/exec-all.h" /* qemu_sprint_backtrace */
21 #include "exec/memory.h"
22 #include "exec/address-spaces.h"
23 #include "exec/ioport.h"
24 #include "qapi/visitor.h"
25 #include "qemu/bitops.h"
26 #include "qemu/error-report.h"
27 #include "qom/object.h"
28 #include "trace-root.h"
30 #include "exec/memory-internal.h"
31 #include "exec/ram_addr.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
34 #include "hw/misc/mmio_interface.h"
35 #include "hw/qdev-properties.h"
36 #include "migration/vmstate.h"
38 //#define DEBUG_UNASSIGNED
40 static unsigned memory_region_transaction_depth
;
41 static bool memory_region_update_pending
;
42 static bool ioeventfd_update_pending
;
43 static bool global_dirty_log
= false;
45 static QTAILQ_HEAD(memory_listeners
, MemoryListener
) memory_listeners
46 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
48 static QTAILQ_HEAD(, AddressSpace
) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
51 static GHashTable
*flat_views
;
53 typedef struct AddrRange AddrRange
;
56 * Note that signed integers are needed for negative offsetting in aliases
57 * (large MemoryRegion::alias_offset).
64 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
66 return (AddrRange
) { start
, size
};
69 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
71 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
74 static Int128
addrrange_end(AddrRange r
)
76 return int128_add(r
.start
, r
.size
);
79 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
81 int128_addto(&range
.start
, delta
);
85 static bool addrrange_contains(AddrRange range
, Int128 addr
)
87 return int128_ge(addr
, range
.start
)
88 && int128_lt(addr
, addrrange_end(range
));
91 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
93 return addrrange_contains(r1
, r2
.start
)
94 || addrrange_contains(r2
, r1
.start
);
97 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
99 Int128 start
= int128_max(r1
.start
, r2
.start
);
100 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
101 return addrrange_make(start
, int128_sub(end
, start
));
104 enum ListenerDirection
{ Forward
, Reverse
};
106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
108 MemoryListener *_listener; \
110 switch (_direction) { \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
120 memory_listeners, link) { \
121 if (_listener->_callback) { \
122 _listener->_callback(_listener, ##_args); \
131 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
133 MemoryListener *_listener; \
134 struct memory_listeners_as *list = &(_as)->listeners; \
136 switch (_direction) { \
138 QTAILQ_FOREACH(_listener, list, link_as) { \
139 if (_listener->_callback) { \
140 _listener->_callback(_listener, _section, ##_args); \
145 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
147 if (_listener->_callback) { \
148 _listener->_callback(_listener, _section, ##_args); \
157 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
158 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
160 MemoryRegionSection mrs = section_from_flat_range(fr, \
161 address_space_to_flatview(as)); \
162 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
165 struct CoalescedMemoryRange
{
167 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
170 struct MemoryRegionIoeventfd
{
177 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
178 MemoryRegionIoeventfd b
)
180 if (int128_lt(a
.addr
.start
, b
.addr
.start
)) {
182 } else if (int128_gt(a
.addr
.start
, b
.addr
.start
)) {
184 } else if (int128_lt(a
.addr
.size
, b
.addr
.size
)) {
186 } else if (int128_gt(a
.addr
.size
, b
.addr
.size
)) {
188 } else if (a
.match_data
< b
.match_data
) {
190 } else if (a
.match_data
> b
.match_data
) {
192 } else if (a
.match_data
) {
193 if (a
.data
< b
.data
) {
195 } else if (a
.data
> b
.data
) {
201 } else if (a
.e
> b
.e
) {
207 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
208 MemoryRegionIoeventfd b
)
210 return !memory_region_ioeventfd_before(a
, b
)
211 && !memory_region_ioeventfd_before(b
, a
);
214 typedef struct FlatRange FlatRange
;
216 /* Range of memory in the global map. Addresses are absolute. */
219 hwaddr offset_in_region
;
221 uint8_t dirty_log_mask
;
226 /* Flattened global view of current active memory hierarchy. Kept in sorted
234 unsigned nr_allocated
;
235 struct AddressSpaceDispatch
*dispatch
;
239 typedef struct AddressSpaceOps AddressSpaceOps
;
241 #define FOR_EACH_FLAT_RANGE(var, view) \
242 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
244 static inline MemoryRegionSection
245 section_from_flat_range(FlatRange
*fr
, FlatView
*fv
)
247 return (MemoryRegionSection
) {
250 .offset_within_region
= fr
->offset_in_region
,
251 .size
= fr
->addr
.size
,
252 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
253 .readonly
= fr
->readonly
,
257 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
259 return a
->mr
== b
->mr
260 && addrrange_equal(a
->addr
, b
->addr
)
261 && a
->offset_in_region
== b
->offset_in_region
262 && a
->romd_mode
== b
->romd_mode
263 && a
->readonly
== b
->readonly
;
266 static FlatView
*flatview_new(MemoryRegion
*mr_root
)
270 view
= g_new0(FlatView
, 1);
272 view
->root
= mr_root
;
273 memory_region_ref(mr_root
);
274 trace_flatview_new(view
, mr_root
);
279 /* Insert a range into a given position. Caller is responsible for maintaining
282 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
284 if (view
->nr
== view
->nr_allocated
) {
285 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
286 view
->ranges
= g_realloc(view
->ranges
,
287 view
->nr_allocated
* sizeof(*view
->ranges
));
289 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
290 (view
->nr
- pos
) * sizeof(FlatRange
));
291 view
->ranges
[pos
] = *range
;
292 memory_region_ref(range
->mr
);
296 static void flatview_destroy(FlatView
*view
)
300 trace_flatview_destroy(view
, view
->root
);
301 if (view
->dispatch
) {
302 address_space_dispatch_free(view
->dispatch
);
304 for (i
= 0; i
< view
->nr
; i
++) {
305 memory_region_unref(view
->ranges
[i
].mr
);
307 g_free(view
->ranges
);
308 memory_region_unref(view
->root
);
312 static bool flatview_ref(FlatView
*view
)
314 return atomic_fetch_inc_nonzero(&view
->ref
) > 0;
317 static void flatview_unref(FlatView
*view
)
319 if (atomic_fetch_dec(&view
->ref
) == 1) {
320 trace_flatview_destroy_rcu(view
, view
->root
);
322 call_rcu(view
, flatview_destroy
, rcu
);
326 FlatView
*address_space_to_flatview(AddressSpace
*as
)
328 return atomic_rcu_read(&as
->current_map
);
331 AddressSpaceDispatch
*flatview_to_dispatch(FlatView
*fv
)
336 AddressSpaceDispatch
*address_space_to_dispatch(AddressSpace
*as
)
338 return flatview_to_dispatch(address_space_to_flatview(as
));
341 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
343 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
345 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
347 int128_make64(r2
->offset_in_region
))
348 && r1
->dirty_log_mask
== r2
->dirty_log_mask
349 && r1
->romd_mode
== r2
->romd_mode
350 && r1
->readonly
== r2
->readonly
;
353 /* Attempt to simplify a view by merging adjacent ranges */
354 static void flatview_simplify(FlatView
*view
)
359 while (i
< view
->nr
) {
362 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
363 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
367 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
368 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
373 static bool memory_region_big_endian(MemoryRegion
*mr
)
375 #ifdef TARGET_WORDS_BIGENDIAN
376 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
378 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
382 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
384 #ifdef TARGET_WORDS_BIGENDIAN
385 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
387 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
391 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
393 if (memory_region_wrong_endianness(mr
)) {
398 *data
= bswap16(*data
);
401 *data
= bswap32(*data
);
404 *data
= bswap64(*data
);
412 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
415 hwaddr abs_addr
= offset
;
417 abs_addr
+= mr
->addr
;
418 for (root
= mr
; root
->container
; ) {
419 root
= root
->container
;
420 abs_addr
+= root
->addr
;
426 static int get_cpu_index(void)
429 return current_cpu
->cpu_index
;
434 static MemTxResult
memory_region_oldmmio_read_accessor(MemoryRegion
*mr
,
444 tmp
= mr
->ops
->old_mmio
.read
[ctz32(size
)](mr
->opaque
, addr
);
446 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
447 } else if (mr
== &io_mem_notdirty
) {
448 /* Accesses to code which has previously been translated into a TB show
449 * up in the MMIO path, as accesses to the io_mem_notdirty
451 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
452 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
453 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
454 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
456 *value
|= (tmp
& mask
) << shift
;
460 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
470 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
472 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
473 } else if (mr
== &io_mem_notdirty
) {
474 /* Accesses to code which has previously been translated into a TB show
475 * up in the MMIO path, as accesses to the io_mem_notdirty
477 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
478 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
479 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
480 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
482 *value
|= (tmp
& mask
) << shift
;
486 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
497 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
499 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
500 } else if (mr
== &io_mem_notdirty
) {
501 /* Accesses to code which has previously been translated into a TB show
502 * up in the MMIO path, as accesses to the io_mem_notdirty
504 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
505 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
506 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
507 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
509 *value
|= (tmp
& mask
) << shift
;
513 static MemTxResult
memory_region_oldmmio_write_accessor(MemoryRegion
*mr
,
523 tmp
= (*value
>> shift
) & mask
;
525 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
526 } else if (mr
== &io_mem_notdirty
) {
527 /* Accesses to code which has previously been translated into a TB show
528 * up in the MMIO path, as accesses to the io_mem_notdirty
530 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
531 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
532 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
533 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
535 mr
->ops
->old_mmio
.write
[ctz32(size
)](mr
->opaque
, addr
, tmp
);
539 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
549 tmp
= (*value
>> shift
) & mask
;
551 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
552 } else if (mr
== &io_mem_notdirty
) {
553 /* Accesses to code which has previously been translated into a TB show
554 * up in the MMIO path, as accesses to the io_mem_notdirty
556 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
557 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
558 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
559 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
561 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
565 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
575 tmp
= (*value
>> shift
) & mask
;
577 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
578 } else if (mr
== &io_mem_notdirty
) {
579 /* Accesses to code which has previously been translated into a TB show
580 * up in the MMIO path, as accesses to the io_mem_notdirty
582 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
583 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
584 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
585 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
587 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
590 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
593 unsigned access_size_min
,
594 unsigned access_size_max
,
595 MemTxResult (*access_fn
)
606 uint64_t access_mask
;
607 unsigned access_size
;
609 MemTxResult r
= MEMTX_OK
;
611 if (!access_size_min
) {
614 if (!access_size_max
) {
618 /* FIXME: support unaligned access? */
619 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
620 access_mask
= -1ULL >> (64 - access_size
* 8);
621 if (memory_region_big_endian(mr
)) {
622 for (i
= 0; i
< size
; i
+= access_size
) {
623 r
|= access_fn(mr
, addr
+ i
, value
, access_size
,
624 (size
- access_size
- i
) * 8, access_mask
, attrs
);
627 for (i
= 0; i
< size
; i
+= access_size
) {
628 r
|= access_fn(mr
, addr
+ i
, value
, access_size
, i
* 8,
635 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
639 while (mr
->container
) {
642 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
643 if (mr
== as
->root
) {
650 /* Render a memory region into the global view. Ranges in @view obscure
653 static void render_memory_region(FlatView
*view
,
659 MemoryRegion
*subregion
;
661 hwaddr offset_in_region
;
671 int128_addto(&base
, int128_make64(mr
->addr
));
672 readonly
|= mr
->readonly
;
674 tmp
= addrrange_make(base
, mr
->size
);
676 if (!addrrange_intersects(tmp
, clip
)) {
680 clip
= addrrange_intersection(tmp
, clip
);
683 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
684 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
685 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
689 /* Render subregions in priority order. */
690 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
691 render_memory_region(view
, subregion
, base
, clip
, readonly
);
694 if (!mr
->terminates
) {
698 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
703 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
704 fr
.romd_mode
= mr
->romd_mode
;
705 fr
.readonly
= readonly
;
707 /* Render the region itself into any gaps left by the current view. */
708 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
709 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
712 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
713 now
= int128_min(remain
,
714 int128_sub(view
->ranges
[i
].addr
.start
, base
));
715 fr
.offset_in_region
= offset_in_region
;
716 fr
.addr
= addrrange_make(base
, now
);
717 flatview_insert(view
, i
, &fr
);
719 int128_addto(&base
, now
);
720 offset_in_region
+= int128_get64(now
);
721 int128_subfrom(&remain
, now
);
723 now
= int128_sub(int128_min(int128_add(base
, remain
),
724 addrrange_end(view
->ranges
[i
].addr
)),
726 int128_addto(&base
, now
);
727 offset_in_region
+= int128_get64(now
);
728 int128_subfrom(&remain
, now
);
730 if (int128_nz(remain
)) {
731 fr
.offset_in_region
= offset_in_region
;
732 fr
.addr
= addrrange_make(base
, remain
);
733 flatview_insert(view
, i
, &fr
);
737 static MemoryRegion
*memory_region_get_flatview_root(MemoryRegion
*mr
)
739 while (mr
->enabled
) {
741 if (!mr
->alias_offset
&& int128_ge(mr
->size
, mr
->alias
->size
)) {
742 /* The alias is included in its entirety. Use it as
743 * the "real" root, so that we can share more FlatViews.
748 } else if (!mr
->terminates
) {
749 unsigned int found
= 0;
750 MemoryRegion
*child
, *next
= NULL
;
751 QTAILQ_FOREACH(child
, &mr
->subregions
, subregions_link
) {
752 if (child
->enabled
) {
757 if (!child
->addr
&& int128_ge(mr
->size
, child
->size
)) {
758 /* A child is included in its entirety. If it's the only
759 * enabled one, use it in the hope of finding an alias down the
760 * way. This will also let us share FlatViews.
781 /* Render a memory topology into a list of disjoint absolute ranges. */
782 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
787 view
= flatview_new(mr
);
790 render_memory_region(view
, mr
, int128_zero(),
791 addrrange_make(int128_zero(), int128_2_64()), false);
793 flatview_simplify(view
);
795 view
->dispatch
= address_space_dispatch_new(view
);
796 for (i
= 0; i
< view
->nr
; i
++) {
797 MemoryRegionSection mrs
=
798 section_from_flat_range(&view
->ranges
[i
], view
);
799 flatview_add_to_dispatch(view
, &mrs
);
801 address_space_dispatch_compact(view
->dispatch
);
802 g_hash_table_replace(flat_views
, mr
, view
);
807 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
808 MemoryRegionIoeventfd
*fds_new
,
810 MemoryRegionIoeventfd
*fds_old
,
814 MemoryRegionIoeventfd
*fd
;
815 MemoryRegionSection section
;
817 /* Generate a symmetric difference of the old and new fd sets, adding
818 * and deleting as necessary.
822 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
823 if (iold
< fds_old_nb
824 && (inew
== fds_new_nb
825 || memory_region_ioeventfd_before(fds_old
[iold
],
828 section
= (MemoryRegionSection
) {
829 .fv
= address_space_to_flatview(as
),
830 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
831 .size
= fd
->addr
.size
,
833 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
834 fd
->match_data
, fd
->data
, fd
->e
);
836 } else if (inew
< fds_new_nb
837 && (iold
== fds_old_nb
838 || memory_region_ioeventfd_before(fds_new
[inew
],
841 section
= (MemoryRegionSection
) {
842 .fv
= address_space_to_flatview(as
),
843 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
844 .size
= fd
->addr
.size
,
846 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
847 fd
->match_data
, fd
->data
, fd
->e
);
856 static FlatView
*address_space_get_flatview(AddressSpace
*as
)
862 view
= address_space_to_flatview(as
);
863 /* If somebody has replaced as->current_map concurrently,
864 * flatview_ref returns false.
866 } while (!flatview_ref(view
));
871 static void address_space_update_ioeventfds(AddressSpace
*as
)
875 unsigned ioeventfd_nb
= 0;
876 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
880 view
= address_space_get_flatview(as
);
881 FOR_EACH_FLAT_RANGE(fr
, view
) {
882 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
883 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
884 int128_sub(fr
->addr
.start
,
885 int128_make64(fr
->offset_in_region
)));
886 if (addrrange_intersects(fr
->addr
, tmp
)) {
888 ioeventfds
= g_realloc(ioeventfds
,
889 ioeventfd_nb
* sizeof(*ioeventfds
));
890 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
891 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
896 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
897 as
->ioeventfds
, as
->ioeventfd_nb
);
899 g_free(as
->ioeventfds
);
900 as
->ioeventfds
= ioeventfds
;
901 as
->ioeventfd_nb
= ioeventfd_nb
;
902 flatview_unref(view
);
905 static void address_space_update_topology_pass(AddressSpace
*as
,
906 const FlatView
*old_view
,
907 const FlatView
*new_view
,
911 FlatRange
*frold
, *frnew
;
913 /* Generate a symmetric difference of the old and new memory maps.
914 * Kill ranges in the old map, and instantiate ranges in the new map.
917 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
918 if (iold
< old_view
->nr
) {
919 frold
= &old_view
->ranges
[iold
];
923 if (inew
< new_view
->nr
) {
924 frnew
= &new_view
->ranges
[inew
];
931 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
932 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
933 && !flatrange_equal(frold
, frnew
)))) {
934 /* In old but not in new, or in both but attributes changed. */
937 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
941 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
942 /* In both and unchanged (except logging may have changed) */
945 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
946 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
947 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
948 frold
->dirty_log_mask
,
949 frnew
->dirty_log_mask
);
951 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
952 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
953 frold
->dirty_log_mask
,
954 frnew
->dirty_log_mask
);
964 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
972 static void flatviews_init(void)
974 static FlatView
*empty_view
;
980 flat_views
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
, NULL
,
981 (GDestroyNotify
) flatview_unref
);
983 empty_view
= generate_memory_topology(NULL
);
984 /* We keep it alive forever in the global variable. */
985 flatview_ref(empty_view
);
987 g_hash_table_replace(flat_views
, NULL
, empty_view
);
988 flatview_ref(empty_view
);
992 static void flatviews_reset(void)
997 g_hash_table_unref(flat_views
);
1002 /* Render unique FVs */
1003 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1004 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1006 if (g_hash_table_lookup(flat_views
, physmr
)) {
1010 generate_memory_topology(physmr
);
1014 static void address_space_set_flatview(AddressSpace
*as
)
1016 FlatView
*old_view
= address_space_to_flatview(as
);
1017 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1018 FlatView
*new_view
= g_hash_table_lookup(flat_views
, physmr
);
1022 if (old_view
== new_view
) {
1027 flatview_ref(old_view
);
1030 flatview_ref(new_view
);
1032 if (!QTAILQ_EMPTY(&as
->listeners
)) {
1033 FlatView tmpview
= { .nr
= 0 }, *old_view2
= old_view
;
1036 old_view2
= &tmpview
;
1038 address_space_update_topology_pass(as
, old_view2
, new_view
, false);
1039 address_space_update_topology_pass(as
, old_view2
, new_view
, true);
1042 /* Writes are protected by the BQL. */
1043 atomic_rcu_set(&as
->current_map
, new_view
);
1045 flatview_unref(old_view
);
1048 /* Note that all the old MemoryRegions are still alive up to this
1049 * point. This relieves most MemoryListeners from the need to
1050 * ref/unref the MemoryRegions they get---unless they use them
1051 * outside the iothread mutex, in which case precise reference
1052 * counting is necessary.
1055 flatview_unref(old_view
);
1059 static void address_space_update_topology(AddressSpace
*as
)
1061 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1064 if (!g_hash_table_lookup(flat_views
, physmr
)) {
1065 generate_memory_topology(physmr
);
1067 address_space_set_flatview(as
);
1070 void memory_region_transaction_begin(void)
1072 qemu_flush_coalesced_mmio_buffer();
1073 ++memory_region_transaction_depth
;
1076 void memory_region_transaction_commit(void)
1080 assert(memory_region_transaction_depth
);
1081 assert(qemu_mutex_iothread_locked());
1083 --memory_region_transaction_depth
;
1084 if (!memory_region_transaction_depth
) {
1085 if (memory_region_update_pending
) {
1088 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
1090 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1091 address_space_set_flatview(as
);
1092 address_space_update_ioeventfds(as
);
1094 memory_region_update_pending
= false;
1095 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
1096 } else if (ioeventfd_update_pending
) {
1097 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1098 address_space_update_ioeventfds(as
);
1100 ioeventfd_update_pending
= false;
1105 static void memory_region_destructor_none(MemoryRegion
*mr
)
1109 static void memory_region_destructor_ram(MemoryRegion
*mr
)
1111 qemu_ram_free(mr
->ram_block
);
1114 static bool memory_region_need_escape(char c
)
1116 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
1119 static char *memory_region_escape_name(const char *name
)
1126 for (p
= name
; *p
; p
++) {
1127 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
1129 if (bytes
== p
- name
) {
1130 return g_memdup(name
, bytes
+ 1);
1133 escaped
= g_malloc(bytes
+ 1);
1134 for (p
= name
, q
= escaped
; *p
; p
++) {
1136 if (unlikely(memory_region_need_escape(c
))) {
1139 *q
++ = "0123456789abcdef"[c
>> 4];
1140 c
= "0123456789abcdef"[c
& 15];
1148 static void memory_region_do_init(MemoryRegion
*mr
,
1153 mr
->size
= int128_make64(size
);
1154 if (size
== UINT64_MAX
) {
1155 mr
->size
= int128_2_64();
1157 mr
->name
= g_strdup(name
);
1159 mr
->ram_block
= NULL
;
1162 char *escaped_name
= memory_region_escape_name(name
);
1163 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1166 owner
= container_get(qdev_get_machine(), "/unattached");
1169 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
1170 object_unref(OBJECT(mr
));
1172 g_free(escaped_name
);
1176 void memory_region_init(MemoryRegion
*mr
,
1181 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
1182 memory_region_do_init(mr
, owner
, name
, size
);
1185 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, const char *name
,
1186 void *opaque
, Error
**errp
)
1188 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1189 uint64_t value
= mr
->addr
;
1191 visit_type_uint64(v
, name
, &value
, errp
);
1194 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1195 const char *name
, void *opaque
,
1198 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1199 gchar
*path
= (gchar
*)"";
1201 if (mr
->container
) {
1202 path
= object_get_canonical_path(OBJECT(mr
->container
));
1204 visit_type_str(v
, name
, &path
, errp
);
1205 if (mr
->container
) {
1210 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1213 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1215 return OBJECT(mr
->container
);
1218 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1219 const char *name
, void *opaque
,
1222 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1223 int32_t value
= mr
->priority
;
1225 visit_type_int32(v
, name
, &value
, errp
);
1228 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1229 void *opaque
, Error
**errp
)
1231 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1232 uint64_t value
= memory_region_size(mr
);
1234 visit_type_uint64(v
, name
, &value
, errp
);
1237 static void memory_region_initfn(Object
*obj
)
1239 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1242 mr
->ops
= &unassigned_mem_ops
;
1244 mr
->romd_mode
= true;
1245 mr
->global_locking
= true;
1246 mr
->destructor
= memory_region_destructor_none
;
1247 QTAILQ_INIT(&mr
->subregions
);
1248 QTAILQ_INIT(&mr
->coalesced
);
1250 op
= object_property_add(OBJECT(mr
), "container",
1251 "link<" TYPE_MEMORY_REGION
">",
1252 memory_region_get_container
,
1253 NULL
, /* memory_region_set_container */
1254 NULL
, NULL
, &error_abort
);
1255 op
->resolve
= memory_region_resolve_container
;
1257 object_property_add(OBJECT(mr
), "addr", "uint64",
1258 memory_region_get_addr
,
1259 NULL
, /* memory_region_set_addr */
1260 NULL
, NULL
, &error_abort
);
1261 object_property_add(OBJECT(mr
), "priority", "uint32",
1262 memory_region_get_priority
,
1263 NULL
, /* memory_region_set_priority */
1264 NULL
, NULL
, &error_abort
);
1265 object_property_add(OBJECT(mr
), "size", "uint64",
1266 memory_region_get_size
,
1267 NULL
, /* memory_region_set_size, */
1268 NULL
, NULL
, &error_abort
);
1271 static int qemu_target_backtrace(target_ulong
*array
, size_t size
)
1275 #if defined(TARGET_ARM)
1276 CPUArchState
*env
= current_cpu
->env_ptr
;
1277 array
[0] = env
->regs
[15];
1278 array
[1] = env
->regs
[14];
1279 #elif defined(TARGET_MIPS)
1280 CPUArchState
*env
= current_cpu
->env_ptr
;
1281 array
[0] = env
->active_tc
.PC
;
1282 array
[1] = env
->active_tc
.gpr
[31];
1292 #include "disas/disas.h"
1293 const char *qemu_sprint_backtrace(char *buffer
, size_t length
)
1297 target_ulong caller
[2];
1299 qemu_target_backtrace(caller
, 2);
1300 symbol
= lookup_symbol(caller
[0]);
1301 p
+= sprintf(p
, "[%s]", symbol
);
1302 symbol
= lookup_symbol(caller
[1]);
1303 p
+= sprintf(p
, "[%s]", symbol
);
1305 p
+= sprintf(p
, "[cpu not running]");
1307 assert((p
- buffer
) < length
);
1311 static void iommu_memory_region_initfn(Object
*obj
)
1313 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1315 mr
->is_iommu
= true;
1318 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1321 if (trace_unassigned
) {
1323 fprintf(stderr
, "Unassigned mem read " TARGET_FMT_plx
" %s\n",
1324 addr
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1327 if (current_cpu
!= NULL
) {
1328 cpu_unassigned_access(current_cpu
, addr
, false, false, 0, size
);
1333 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1334 uint64_t val
, unsigned size
)
1336 if (trace_unassigned
) {
1338 fprintf(stderr
, "Unassigned mem write " TARGET_FMT_plx
1339 " = 0x%" PRIx64
" %s\n",
1340 addr
, val
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1342 if (current_cpu
!= NULL
) {
1343 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1347 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1348 unsigned size
, bool is_write
)
1353 const MemoryRegionOps unassigned_mem_ops
= {
1354 .valid
.accepts
= unassigned_mem_accepts
,
1355 .endianness
= DEVICE_NATIVE_ENDIAN
,
1358 static uint64_t memory_region_ram_device_read(void *opaque
,
1359 hwaddr addr
, unsigned size
)
1361 MemoryRegion
*mr
= opaque
;
1362 uint64_t data
= (uint64_t)~0;
1366 data
= *(uint8_t *)(mr
->ram_block
->host
+ addr
);
1369 data
= *(uint16_t *)(mr
->ram_block
->host
+ addr
);
1372 data
= *(uint32_t *)(mr
->ram_block
->host
+ addr
);
1375 data
= *(uint64_t *)(mr
->ram_block
->host
+ addr
);
1379 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1384 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1385 uint64_t data
, unsigned size
)
1387 MemoryRegion
*mr
= opaque
;
1389 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1393 *(uint8_t *)(mr
->ram_block
->host
+ addr
) = (uint8_t)data
;
1396 *(uint16_t *)(mr
->ram_block
->host
+ addr
) = (uint16_t)data
;
1399 *(uint32_t *)(mr
->ram_block
->host
+ addr
) = (uint32_t)data
;
1402 *(uint64_t *)(mr
->ram_block
->host
+ addr
) = data
;
1407 static const MemoryRegionOps ram_device_mem_ops
= {
1408 .read
= memory_region_ram_device_read
,
1409 .write
= memory_region_ram_device_write
,
1410 .endianness
= DEVICE_HOST_ENDIAN
,
1412 .min_access_size
= 1,
1413 .max_access_size
= 8,
1417 .min_access_size
= 1,
1418 .max_access_size
= 8,
1423 bool memory_region_access_valid(MemoryRegion
*mr
,
1428 int access_size_min
, access_size_max
;
1431 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1432 fprintf(stderr
, "Misaligned i/o to address %08" HWADDR_PRIx
1433 " with size %u for memory region %s\n",
1434 addr
, size
, mr
->name
);
1438 if (!mr
->ops
->valid
.accepts
) {
1442 access_size_min
= mr
->ops
->valid
.min_access_size
;
1443 if (!mr
->ops
->valid
.min_access_size
) {
1444 access_size_min
= 1;
1447 access_size_max
= mr
->ops
->valid
.max_access_size
;
1448 if (!mr
->ops
->valid
.max_access_size
) {
1449 access_size_max
= 4;
1452 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1453 for (i
= 0; i
< size
; i
+= access_size
) {
1454 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1463 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1471 if (mr
->ops
->read
) {
1472 return access_with_adjusted_size(addr
, pval
, size
,
1473 mr
->ops
->impl
.min_access_size
,
1474 mr
->ops
->impl
.max_access_size
,
1475 memory_region_read_accessor
,
1477 } else if (mr
->ops
->read_with_attrs
) {
1478 return access_with_adjusted_size(addr
, pval
, size
,
1479 mr
->ops
->impl
.min_access_size
,
1480 mr
->ops
->impl
.max_access_size
,
1481 memory_region_read_with_attrs_accessor
,
1484 return access_with_adjusted_size(addr
, pval
, size
, 1, 4,
1485 memory_region_oldmmio_read_accessor
,
1490 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1498 if (!memory_region_access_valid(mr
, addr
, size
, false)) {
1499 *pval
= unassigned_mem_read(mr
, addr
, size
);
1500 return MEMTX_DECODE_ERROR
;
1503 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1504 adjust_endianness(mr
, pval
, size
);
1508 /* Return true if an eventfd was signalled */
1509 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1515 MemoryRegionIoeventfd ioeventfd
= {
1516 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1521 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1522 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1523 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1525 if (memory_region_ioeventfd_equal(ioeventfd
, mr
->ioeventfds
[i
])) {
1526 event_notifier_set(ioeventfd
.e
);
1534 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1540 if (!memory_region_access_valid(mr
, addr
, size
, true)) {
1541 unassigned_mem_write(mr
, addr
, data
, size
);
1542 return MEMTX_DECODE_ERROR
;
1545 adjust_endianness(mr
, &data
, size
);
1547 if ((!kvm_eventfds_enabled()) &&
1548 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1552 if (mr
->ops
->write
) {
1553 return access_with_adjusted_size(addr
, &data
, size
,
1554 mr
->ops
->impl
.min_access_size
,
1555 mr
->ops
->impl
.max_access_size
,
1556 memory_region_write_accessor
, mr
,
1558 } else if (mr
->ops
->write_with_attrs
) {
1560 access_with_adjusted_size(addr
, &data
, size
,
1561 mr
->ops
->impl
.min_access_size
,
1562 mr
->ops
->impl
.max_access_size
,
1563 memory_region_write_with_attrs_accessor
,
1566 return access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1567 memory_region_oldmmio_write_accessor
,
1572 void memory_region_init_io(MemoryRegion
*mr
,
1574 const MemoryRegionOps
*ops
,
1579 memory_region_init(mr
, owner
, name
, size
);
1580 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1581 mr
->opaque
= opaque
;
1582 mr
->terminates
= true;
1585 void memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
1591 memory_region_init(mr
, owner
, name
, size
);
1593 mr
->terminates
= true;
1594 mr
->destructor
= memory_region_destructor_ram
;
1595 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1596 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1599 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1604 void (*resized
)(const char*,
1609 memory_region_init(mr
, owner
, name
, size
);
1611 mr
->terminates
= true;
1612 mr
->destructor
= memory_region_destructor_ram
;
1613 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1615 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1619 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1620 struct Object
*owner
,
1627 memory_region_init(mr
, owner
, name
, size
);
1629 mr
->terminates
= true;
1630 mr
->destructor
= memory_region_destructor_ram
;
1631 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, share
, path
, errp
);
1632 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1635 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
1636 struct Object
*owner
,
1643 memory_region_init(mr
, owner
, name
, size
);
1645 mr
->terminates
= true;
1646 mr
->destructor
= memory_region_destructor_ram
;
1647 mr
->ram_block
= qemu_ram_alloc_from_fd(size
, mr
, share
, fd
, errp
);
1648 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1652 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1658 memory_region_init(mr
, owner
, name
, size
);
1660 mr
->terminates
= true;
1661 mr
->destructor
= memory_region_destructor_ram
;
1662 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1664 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1665 assert(ptr
!= NULL
);
1666 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1669 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1675 memory_region_init_ram_ptr(mr
, owner
, name
, size
, ptr
);
1676 mr
->ram_device
= true;
1677 mr
->ops
= &ram_device_mem_ops
;
1681 void memory_region_init_alias(MemoryRegion
*mr
,
1688 memory_region_init(mr
, owner
, name
, size
);
1690 mr
->alias_offset
= offset
;
1693 void memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
1694 struct Object
*owner
,
1699 memory_region_init(mr
, owner
, name
, size
);
1701 mr
->readonly
= true;
1702 mr
->terminates
= true;
1703 mr
->destructor
= memory_region_destructor_ram
;
1704 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1705 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1708 void memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
1710 const MemoryRegionOps
*ops
,
1717 memory_region_init(mr
, owner
, name
, size
);
1719 mr
->opaque
= opaque
;
1720 mr
->terminates
= true;
1721 mr
->rom_device
= true;
1722 mr
->destructor
= memory_region_destructor_ram
;
1723 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1726 void memory_region_init_iommu(void *_iommu_mr
,
1727 size_t instance_size
,
1728 const char *mrtypename
,
1733 struct IOMMUMemoryRegion
*iommu_mr
;
1734 struct MemoryRegion
*mr
;
1736 object_initialize(_iommu_mr
, instance_size
, mrtypename
);
1737 mr
= MEMORY_REGION(_iommu_mr
);
1738 memory_region_do_init(mr
, owner
, name
, size
);
1739 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1740 mr
->terminates
= true; /* then re-forwards */
1741 QLIST_INIT(&iommu_mr
->iommu_notify
);
1742 iommu_mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1745 static void memory_region_finalize(Object
*obj
)
1747 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1749 assert(!mr
->container
);
1751 /* We know the region is not visible in any address space (it
1752 * does not have a container and cannot be a root either because
1753 * it has no references, so we can blindly clear mr->enabled.
1754 * memory_region_set_enabled instead could trigger a transaction
1755 * and cause an infinite loop.
1757 mr
->enabled
= false;
1758 memory_region_transaction_begin();
1759 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1760 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1761 memory_region_del_subregion(mr
, subregion
);
1763 memory_region_transaction_commit();
1766 memory_region_clear_coalescing(mr
);
1767 g_free((char *)mr
->name
);
1768 g_free(mr
->ioeventfds
);
1771 Object
*memory_region_owner(MemoryRegion
*mr
)
1773 Object
*obj
= OBJECT(mr
);
1777 void memory_region_ref(MemoryRegion
*mr
)
1779 /* MMIO callbacks most likely will access data that belongs
1780 * to the owner, hence the need to ref/unref the owner whenever
1781 * the memory region is in use.
1783 * The memory region is a child of its owner. As long as the
1784 * owner doesn't call unparent itself on the memory region,
1785 * ref-ing the owner will also keep the memory region alive.
1786 * Memory regions without an owner are supposed to never go away;
1787 * we do not ref/unref them because it slows down DMA sensibly.
1789 if (mr
&& mr
->owner
) {
1790 object_ref(mr
->owner
);
1794 void memory_region_unref(MemoryRegion
*mr
)
1796 if (mr
&& mr
->owner
) {
1797 object_unref(mr
->owner
);
1801 uint64_t memory_region_size(MemoryRegion
*mr
)
1803 if (int128_eq(mr
->size
, int128_2_64())) {
1806 return int128_get64(mr
->size
);
1809 const char *memory_region_name(const MemoryRegion
*mr
)
1812 ((MemoryRegion
*)mr
)->name
=
1813 object_get_canonical_path_component(OBJECT(mr
));
1818 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1820 return mr
->ram_device
;
1823 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1825 uint8_t mask
= mr
->dirty_log_mask
;
1826 if (global_dirty_log
&& mr
->ram_block
) {
1827 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1832 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1834 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1837 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion
*iommu_mr
)
1839 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1840 IOMMUNotifier
*iommu_notifier
;
1841 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1843 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1844 flags
|= iommu_notifier
->notifier_flags
;
1847 if (flags
!= iommu_mr
->iommu_notify_flags
&& imrc
->notify_flag_changed
) {
1848 imrc
->notify_flag_changed(iommu_mr
,
1849 iommu_mr
->iommu_notify_flags
,
1853 iommu_mr
->iommu_notify_flags
= flags
;
1856 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1859 IOMMUMemoryRegion
*iommu_mr
;
1862 memory_region_register_iommu_notifier(mr
->alias
, n
);
1866 /* We need to register for at least one bitfield */
1867 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1868 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1869 assert(n
->start
<= n
->end
);
1870 QLIST_INSERT_HEAD(&iommu_mr
->iommu_notify
, n
, node
);
1871 memory_region_update_iommu_notify_flags(iommu_mr
);
1874 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
)
1876 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1878 if (imrc
->get_min_page_size
) {
1879 return imrc
->get_min_page_size(iommu_mr
);
1881 return TARGET_PAGE_SIZE
;
1884 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
1886 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
1887 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1888 hwaddr addr
, granularity
;
1889 IOMMUTLBEntry iotlb
;
1891 /* If the IOMMU has its own replay callback, override */
1893 imrc
->replay(iommu_mr
, n
);
1897 granularity
= memory_region_iommu_get_min_page_size(iommu_mr
);
1899 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1900 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
);
1901 if (iotlb
.perm
!= IOMMU_NONE
) {
1902 n
->notify(n
, &iotlb
);
1905 /* if (2^64 - MR size) < granularity, it's possible to get an
1906 * infinite loop here. This should catch such a wraparound */
1907 if ((addr
+ granularity
) < addr
) {
1913 void memory_region_iommu_replay_all(IOMMUMemoryRegion
*iommu_mr
)
1915 IOMMUNotifier
*notifier
;
1917 IOMMU_NOTIFIER_FOREACH(notifier
, iommu_mr
) {
1918 memory_region_iommu_replay(iommu_mr
, notifier
);
1922 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1925 IOMMUMemoryRegion
*iommu_mr
;
1928 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
1931 QLIST_REMOVE(n
, node
);
1932 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1933 memory_region_update_iommu_notify_flags(iommu_mr
);
1936 void memory_region_notify_one(IOMMUNotifier
*notifier
,
1937 IOMMUTLBEntry
*entry
)
1939 IOMMUNotifierFlag request_flags
;
1942 * Skip the notification if the notification does not overlap
1943 * with registered range.
1945 if (notifier
->start
> entry
->iova
+ entry
->addr_mask
||
1946 notifier
->end
< entry
->iova
) {
1950 if (entry
->perm
& IOMMU_RW
) {
1951 request_flags
= IOMMU_NOTIFIER_MAP
;
1953 request_flags
= IOMMU_NOTIFIER_UNMAP
;
1956 if (notifier
->notifier_flags
& request_flags
) {
1957 notifier
->notify(notifier
, entry
);
1961 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
1962 IOMMUTLBEntry entry
)
1964 IOMMUNotifier
*iommu_notifier
;
1966 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr
)));
1968 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1969 memory_region_notify_one(iommu_notifier
, &entry
);
1973 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
1975 uint8_t mask
= 1 << client
;
1976 uint8_t old_logging
;
1978 assert(client
== DIRTY_MEMORY_VGA
);
1979 old_logging
= mr
->vga_logging_count
;
1980 mr
->vga_logging_count
+= log
? 1 : -1;
1981 if (!!old_logging
== !!mr
->vga_logging_count
) {
1985 memory_region_transaction_begin();
1986 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
1987 memory_region_update_pending
|= mr
->enabled
;
1988 memory_region_transaction_commit();
1991 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
1992 hwaddr size
, unsigned client
)
1994 assert(mr
->ram_block
);
1995 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr
) + addr
,
1999 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2002 assert(mr
->ram_block
);
2003 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
2005 memory_region_get_dirty_log_mask(mr
));
2008 bool memory_region_test_and_clear_dirty(MemoryRegion
*mr
, hwaddr addr
,
2009 hwaddr size
, unsigned client
)
2011 assert(mr
->ram_block
);
2012 return cpu_physical_memory_test_and_clear_dirty(
2013 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2016 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
2021 assert(mr
->ram_block
);
2022 return cpu_physical_memory_snapshot_and_clear_dirty(
2023 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2026 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
, DirtyBitmapSnapshot
*snap
,
2027 hwaddr addr
, hwaddr size
)
2029 assert(mr
->ram_block
);
2030 return cpu_physical_memory_snapshot_get_dirty(snap
,
2031 memory_region_get_ram_addr(mr
) + addr
, size
);
2034 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
2036 MemoryListener
*listener
;
2041 /* If the same address space has multiple log_sync listeners, we
2042 * visit that address space's FlatView multiple times. But because
2043 * log_sync listeners are rare, it's still cheaper than walking each
2044 * address space once.
2046 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2047 if (!listener
->log_sync
) {
2050 as
= listener
->address_space
;
2051 view
= address_space_get_flatview(as
);
2052 FOR_EACH_FLAT_RANGE(fr
, view
) {
2054 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
2055 listener
->log_sync(listener
, &mrs
);
2058 flatview_unref(view
);
2062 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
2064 if (mr
->readonly
!= readonly
) {
2065 memory_region_transaction_begin();
2066 mr
->readonly
= readonly
;
2067 memory_region_update_pending
|= mr
->enabled
;
2068 memory_region_transaction_commit();
2072 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
2074 if (mr
->romd_mode
!= romd_mode
) {
2075 memory_region_transaction_begin();
2076 mr
->romd_mode
= romd_mode
;
2077 memory_region_update_pending
|= mr
->enabled
;
2078 memory_region_transaction_commit();
2082 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
2083 hwaddr size
, unsigned client
)
2085 assert(mr
->ram_block
);
2086 cpu_physical_memory_test_and_clear_dirty(
2087 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2090 int memory_region_get_fd(MemoryRegion
*mr
)
2098 fd
= mr
->ram_block
->fd
;
2104 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
2107 uint64_t offset
= 0;
2111 offset
+= mr
->alias_offset
;
2114 assert(mr
->ram_block
);
2115 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
2121 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
2125 block
= qemu_ram_block_from_host(ptr
, false, offset
);
2133 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
2135 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
2138 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
2140 assert(mr
->ram_block
);
2142 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
2145 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
2149 CoalescedMemoryRange
*cmr
;
2151 MemoryRegionSection section
;
2153 view
= address_space_get_flatview(as
);
2154 FOR_EACH_FLAT_RANGE(fr
, view
) {
2156 section
= (MemoryRegionSection
) {
2158 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
2159 .size
= fr
->addr
.size
,
2162 MEMORY_LISTENER_CALL(as
, coalesced_mmio_del
, Reverse
, §ion
,
2163 int128_get64(fr
->addr
.start
),
2164 int128_get64(fr
->addr
.size
));
2165 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
2166 tmp
= addrrange_shift(cmr
->addr
,
2167 int128_sub(fr
->addr
.start
,
2168 int128_make64(fr
->offset_in_region
)));
2169 if (!addrrange_intersects(tmp
, fr
->addr
)) {
2172 tmp
= addrrange_intersection(tmp
, fr
->addr
);
2173 MEMORY_LISTENER_CALL(as
, coalesced_mmio_add
, Forward
, §ion
,
2174 int128_get64(tmp
.start
),
2175 int128_get64(tmp
.size
));
2179 flatview_unref(view
);
2182 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
2186 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2187 memory_region_update_coalesced_range_as(mr
, as
);
2191 void memory_region_set_coalescing(MemoryRegion
*mr
)
2193 memory_region_clear_coalescing(mr
);
2194 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
2197 void memory_region_add_coalescing(MemoryRegion
*mr
,
2201 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
2203 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
2204 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
2205 memory_region_update_coalesced_range(mr
);
2206 memory_region_set_flush_coalesced(mr
);
2209 void memory_region_clear_coalescing(MemoryRegion
*mr
)
2211 CoalescedMemoryRange
*cmr
;
2212 bool updated
= false;
2214 qemu_flush_coalesced_mmio_buffer();
2215 mr
->flush_coalesced_mmio
= false;
2217 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
2218 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
2219 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
2225 memory_region_update_coalesced_range(mr
);
2229 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
2231 mr
->flush_coalesced_mmio
= true;
2234 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
2236 qemu_flush_coalesced_mmio_buffer();
2237 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2238 mr
->flush_coalesced_mmio
= false;
2242 void memory_region_clear_global_locking(MemoryRegion
*mr
)
2244 mr
->global_locking
= false;
2247 static bool userspace_eventfd_warning
;
2249 void memory_region_add_eventfd(MemoryRegion
*mr
,
2256 MemoryRegionIoeventfd mrfd
= {
2257 .addr
.start
= int128_make64(addr
),
2258 .addr
.size
= int128_make64(size
),
2259 .match_data
= match_data
,
2265 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2266 userspace_eventfd_warning
))) {
2267 userspace_eventfd_warning
= true;
2268 error_report("Using eventfd without MMIO binding in KVM. "
2269 "Suboptimal performance expected");
2273 adjust_endianness(mr
, &mrfd
.data
, size
);
2275 memory_region_transaction_begin();
2276 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2277 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
2282 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2283 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2284 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2285 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2286 mr
->ioeventfds
[i
] = mrfd
;
2287 ioeventfd_update_pending
|= mr
->enabled
;
2288 memory_region_transaction_commit();
2291 void memory_region_del_eventfd(MemoryRegion
*mr
,
2298 MemoryRegionIoeventfd mrfd
= {
2299 .addr
.start
= int128_make64(addr
),
2300 .addr
.size
= int128_make64(size
),
2301 .match_data
= match_data
,
2308 adjust_endianness(mr
, &mrfd
.data
, size
);
2310 memory_region_transaction_begin();
2311 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2312 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
2316 assert(i
!= mr
->ioeventfd_nb
);
2317 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2318 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2320 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2321 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2322 ioeventfd_update_pending
|= mr
->enabled
;
2323 memory_region_transaction_commit();
2326 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2328 MemoryRegion
*mr
= subregion
->container
;
2329 MemoryRegion
*other
;
2331 memory_region_transaction_begin();
2333 memory_region_ref(subregion
);
2334 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2335 if (subregion
->priority
>= other
->priority
) {
2336 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2340 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2342 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2343 memory_region_transaction_commit();
2346 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2348 MemoryRegion
*subregion
)
2350 assert(!subregion
->container
);
2351 subregion
->container
= mr
;
2352 subregion
->addr
= offset
;
2353 memory_region_update_container_subregions(subregion
);
2356 void memory_region_add_subregion(MemoryRegion
*mr
,
2358 MemoryRegion
*subregion
)
2360 subregion
->priority
= 0;
2361 memory_region_add_subregion_common(mr
, offset
, subregion
);
2364 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2366 MemoryRegion
*subregion
,
2369 subregion
->priority
= priority
;
2370 memory_region_add_subregion_common(mr
, offset
, subregion
);
2373 void memory_region_del_subregion(MemoryRegion
*mr
,
2374 MemoryRegion
*subregion
)
2376 memory_region_transaction_begin();
2377 assert(subregion
->container
== mr
);
2378 subregion
->container
= NULL
;
2379 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2380 memory_region_unref(subregion
);
2381 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2382 memory_region_transaction_commit();
2385 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2387 if (enabled
== mr
->enabled
) {
2390 memory_region_transaction_begin();
2391 mr
->enabled
= enabled
;
2392 memory_region_update_pending
= true;
2393 memory_region_transaction_commit();
2396 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2398 Int128 s
= int128_make64(size
);
2400 if (size
== UINT64_MAX
) {
2403 if (int128_eq(s
, mr
->size
)) {
2406 memory_region_transaction_begin();
2408 memory_region_update_pending
= true;
2409 memory_region_transaction_commit();
2412 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2414 MemoryRegion
*container
= mr
->container
;
2417 memory_region_transaction_begin();
2418 memory_region_ref(mr
);
2419 memory_region_del_subregion(container
, mr
);
2420 mr
->container
= container
;
2421 memory_region_update_container_subregions(mr
);
2422 memory_region_unref(mr
);
2423 memory_region_transaction_commit();
2427 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2429 if (addr
!= mr
->addr
) {
2431 memory_region_readd_subregion(mr
);
2435 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2439 if (offset
== mr
->alias_offset
) {
2443 memory_region_transaction_begin();
2444 mr
->alias_offset
= offset
;
2445 memory_region_update_pending
|= mr
->enabled
;
2446 memory_region_transaction_commit();
2449 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2454 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2456 const AddrRange
*addr
= addr_
;
2457 const FlatRange
*fr
= fr_
;
2459 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2461 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2467 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2469 return bsearch(&addr
, view
->ranges
, view
->nr
,
2470 sizeof(FlatRange
), cmp_flatrange_addr
);
2473 bool memory_region_is_mapped(MemoryRegion
*mr
)
2475 return mr
->container
? true : false;
2478 /* Same as memory_region_find, but it does not add a reference to the
2479 * returned region. It must be called from an RCU critical section.
2481 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2482 hwaddr addr
, uint64_t size
)
2484 MemoryRegionSection ret
= { .mr
= NULL
};
2492 for (root
= mr
; root
->container
; ) {
2493 root
= root
->container
;
2497 as
= memory_region_to_address_space(root
);
2501 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2503 view
= address_space_to_flatview(as
);
2504 fr
= flatview_lookup(view
, range
);
2509 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2515 range
= addrrange_intersection(range
, fr
->addr
);
2516 ret
.offset_within_region
= fr
->offset_in_region
;
2517 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2519 ret
.size
= range
.size
;
2520 ret
.offset_within_address_space
= int128_get64(range
.start
);
2521 ret
.readonly
= fr
->readonly
;
2525 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2526 hwaddr addr
, uint64_t size
)
2528 MemoryRegionSection ret
;
2530 ret
= memory_region_find_rcu(mr
, addr
, size
);
2532 memory_region_ref(ret
.mr
);
2538 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2543 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2545 return mr
&& mr
!= container
;
2548 void memory_global_dirty_log_sync(void)
2550 MemoryListener
*listener
;
2555 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2556 if (!listener
->log_sync
) {
2559 as
= listener
->address_space
;
2560 view
= address_space_get_flatview(as
);
2561 FOR_EACH_FLAT_RANGE(fr
, view
) {
2562 if (fr
->dirty_log_mask
) {
2563 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
2565 listener
->log_sync(listener
, &mrs
);
2568 flatview_unref(view
);
2572 static VMChangeStateEntry
*vmstate_change
;
2574 void memory_global_dirty_log_start(void)
2576 if (vmstate_change
) {
2577 qemu_del_vm_change_state_handler(vmstate_change
);
2578 vmstate_change
= NULL
;
2581 global_dirty_log
= true;
2583 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2585 /* Refresh DIRTY_LOG_MIGRATION bit. */
2586 memory_region_transaction_begin();
2587 memory_region_update_pending
= true;
2588 memory_region_transaction_commit();
2591 static void memory_global_dirty_log_do_stop(void)
2593 global_dirty_log
= false;
2595 /* Refresh DIRTY_LOG_MIGRATION bit. */
2596 memory_region_transaction_begin();
2597 memory_region_update_pending
= true;
2598 memory_region_transaction_commit();
2600 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2603 static void memory_vm_change_state_handler(void *opaque
, int running
,
2607 memory_global_dirty_log_do_stop();
2609 if (vmstate_change
) {
2610 qemu_del_vm_change_state_handler(vmstate_change
);
2611 vmstate_change
= NULL
;
2616 void memory_global_dirty_log_stop(void)
2618 if (!runstate_is_running()) {
2619 if (vmstate_change
) {
2622 vmstate_change
= qemu_add_vm_change_state_handler(
2623 memory_vm_change_state_handler
, NULL
);
2627 memory_global_dirty_log_do_stop();
2630 static void listener_add_address_space(MemoryListener
*listener
,
2636 if (listener
->begin
) {
2637 listener
->begin(listener
);
2639 if (global_dirty_log
) {
2640 if (listener
->log_global_start
) {
2641 listener
->log_global_start(listener
);
2645 view
= address_space_get_flatview(as
);
2646 FOR_EACH_FLAT_RANGE(fr
, view
) {
2647 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2649 if (listener
->region_add
) {
2650 listener
->region_add(listener
, §ion
);
2652 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2653 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2656 if (listener
->commit
) {
2657 listener
->commit(listener
);
2659 flatview_unref(view
);
2662 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2664 MemoryListener
*other
= NULL
;
2666 listener
->address_space
= as
;
2667 if (QTAILQ_EMPTY(&memory_listeners
)
2668 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
,
2669 memory_listeners
)->priority
) {
2670 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2672 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2673 if (listener
->priority
< other
->priority
) {
2677 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2680 if (QTAILQ_EMPTY(&as
->listeners
)
2681 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
,
2682 memory_listeners
)->priority
) {
2683 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
2685 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
2686 if (listener
->priority
< other
->priority
) {
2690 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
2693 listener_add_address_space(listener
, as
);
2696 void memory_listener_unregister(MemoryListener
*listener
)
2698 if (!listener
->address_space
) {
2702 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2703 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
2704 listener
->address_space
= NULL
;
2707 bool memory_region_request_mmio_ptr(MemoryRegion
*mr
, hwaddr addr
)
2711 unsigned offset
= 0;
2712 Object
*new_interface
;
2714 if (!mr
|| !mr
->ops
->request_ptr
) {
2719 * Avoid an update if the request_ptr call
2720 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2723 memory_region_transaction_begin();
2725 host
= mr
->ops
->request_ptr(mr
->opaque
, addr
- mr
->addr
, &size
, &offset
);
2727 if (!host
|| !size
) {
2728 memory_region_transaction_commit();
2732 new_interface
= object_new("mmio_interface");
2733 qdev_prop_set_uint64(DEVICE(new_interface
), "start", offset
);
2734 qdev_prop_set_uint64(DEVICE(new_interface
), "end", offset
+ size
- 1);
2735 qdev_prop_set_bit(DEVICE(new_interface
), "ro", true);
2736 qdev_prop_set_ptr(DEVICE(new_interface
), "host_ptr", host
);
2737 qdev_prop_set_ptr(DEVICE(new_interface
), "subregion", mr
);
2738 object_property_set_bool(OBJECT(new_interface
), true, "realized", NULL
);
2740 memory_region_transaction_commit();
2744 typedef struct MMIOPtrInvalidate
{
2750 } MMIOPtrInvalidate
;
2752 #define MAX_MMIO_INVALIDATE 10
2753 static MMIOPtrInvalidate mmio_ptr_invalidate_list
[MAX_MMIO_INVALIDATE
];
2755 static void memory_region_do_invalidate_mmio_ptr(CPUState
*cpu
,
2756 run_on_cpu_data data
)
2758 MMIOPtrInvalidate
*invalidate_data
= (MMIOPtrInvalidate
*)data
.host_ptr
;
2759 MemoryRegion
*mr
= invalidate_data
->mr
;
2760 hwaddr offset
= invalidate_data
->offset
;
2761 unsigned size
= invalidate_data
->size
;
2762 MemoryRegionSection section
= memory_region_find(mr
, offset
, size
);
2764 qemu_mutex_lock_iothread();
2766 /* Reset dirty so this doesn't happen later. */
2767 cpu_physical_memory_test_and_clear_dirty(offset
, size
, 1);
2769 if (section
.mr
!= mr
) {
2770 /* memory_region_find add a ref on section.mr */
2771 memory_region_unref(section
.mr
);
2772 if (MMIO_INTERFACE(section
.mr
->owner
)) {
2773 /* We found the interface just drop it. */
2774 object_property_set_bool(section
.mr
->owner
, false, "realized",
2776 object_unref(section
.mr
->owner
);
2777 object_unparent(section
.mr
->owner
);
2781 qemu_mutex_unlock_iothread();
2783 if (invalidate_data
->allocated
) {
2784 g_free(invalidate_data
);
2786 invalidate_data
->busy
= 0;
2790 void memory_region_invalidate_mmio_ptr(MemoryRegion
*mr
, hwaddr offset
,
2794 MMIOPtrInvalidate
*invalidate_data
= NULL
;
2796 for (i
= 0; i
< MAX_MMIO_INVALIDATE
; i
++) {
2797 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list
[i
].busy
), 0, 1) == 0) {
2798 invalidate_data
= &mmio_ptr_invalidate_list
[i
];
2803 if (!invalidate_data
) {
2804 invalidate_data
= g_malloc0(sizeof(MMIOPtrInvalidate
));
2805 invalidate_data
->allocated
= 1;
2808 invalidate_data
->mr
= mr
;
2809 invalidate_data
->offset
= offset
;
2810 invalidate_data
->size
= size
;
2812 async_safe_run_on_cpu(first_cpu
, memory_region_do_invalidate_mmio_ptr
,
2813 RUN_ON_CPU_HOST_PTR(invalidate_data
));
2816 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2818 memory_region_ref(root
);
2820 as
->current_map
= NULL
;
2821 as
->ioeventfd_nb
= 0;
2822 as
->ioeventfds
= NULL
;
2823 QTAILQ_INIT(&as
->listeners
);
2824 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2825 as
->name
= g_strdup(name
? name
: "anonymous");
2826 address_space_update_topology(as
);
2827 address_space_update_ioeventfds(as
);
2830 static void do_address_space_destroy(AddressSpace
*as
)
2832 assert(QTAILQ_EMPTY(&as
->listeners
));
2834 flatview_unref(as
->current_map
);
2836 g_free(as
->ioeventfds
);
2837 memory_region_unref(as
->root
);
2840 void address_space_destroy(AddressSpace
*as
)
2842 MemoryRegion
*root
= as
->root
;
2844 /* Flush out anything from MemoryListeners listening in on this */
2845 memory_region_transaction_begin();
2847 memory_region_transaction_commit();
2848 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2850 /* At this point, as->dispatch and as->current_map are dummy
2851 * entries that the guest should never use. Wait for the old
2852 * values to expire before freeing the data.
2855 call_rcu(as
, do_address_space_destroy
, rcu
);
2858 static const char *memory_region_type(MemoryRegion
*mr
)
2860 if (memory_region_is_ram_device(mr
)) {
2862 } else if (memory_region_is_romd(mr
)) {
2864 } else if (memory_region_is_rom(mr
)) {
2866 } else if (memory_region_is_ram(mr
)) {
2873 typedef struct MemoryRegionList MemoryRegionList
;
2875 struct MemoryRegionList
{
2876 const MemoryRegion
*mr
;
2877 QTAILQ_ENTRY(MemoryRegionList
) mrqueue
;
2880 typedef QTAILQ_HEAD(mrqueue
, MemoryRegionList
) MemoryRegionListHead
;
2882 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2883 int128_sub((size), int128_one())) : 0)
2884 #define MTREE_INDENT " "
2886 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
2887 const MemoryRegion
*mr
, unsigned int level
,
2889 MemoryRegionListHead
*alias_print_queue
)
2891 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2892 MemoryRegionListHead submr_print_queue
;
2893 const MemoryRegion
*submr
;
2895 hwaddr cur_start
, cur_end
;
2901 for (i
= 0; i
< level
; i
++) {
2902 mon_printf(f
, MTREE_INDENT
);
2905 cur_start
= base
+ mr
->addr
;
2906 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
2909 * Try to detect overflow of memory region. This should never
2910 * happen normally. When it happens, we dump something to warn the
2911 * user who is observing this.
2913 if (cur_start
< base
|| cur_end
< cur_start
) {
2914 mon_printf(f
, "[DETECTED OVERFLOW!] ");
2918 MemoryRegionList
*ml
;
2921 /* check if the alias is already in the queue */
2922 QTAILQ_FOREACH(ml
, alias_print_queue
, mrqueue
) {
2923 if (ml
->mr
== mr
->alias
) {
2929 ml
= g_new(MemoryRegionList
, 1);
2931 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, mrqueue
);
2933 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
2934 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2935 "-" TARGET_FMT_plx
"%s\n",
2938 memory_region_type((MemoryRegion
*)mr
),
2939 memory_region_name(mr
),
2940 memory_region_name(mr
->alias
),
2942 mr
->alias_offset
+ MR_SIZE(mr
->size
),
2943 mr
->enabled
? "" : " [disabled]");
2946 TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d, %s): %s%s\n",
2949 memory_region_type((MemoryRegion
*)mr
),
2950 memory_region_name(mr
),
2951 mr
->enabled
? "" : " [disabled]");
2954 QTAILQ_INIT(&submr_print_queue
);
2956 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2957 new_ml
= g_new(MemoryRegionList
, 1);
2959 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
2960 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2961 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2962 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2963 QTAILQ_INSERT_BEFORE(ml
, new_ml
, mrqueue
);
2969 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, mrqueue
);
2973 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
2974 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, cur_start
,
2978 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, mrqueue
, next_ml
) {
2983 struct FlatViewInfo
{
2984 fprintf_function mon_printf
;
2990 static void mtree_print_flatview(gpointer key
, gpointer value
,
2993 FlatView
*view
= key
;
2994 GArray
*fv_address_spaces
= value
;
2995 struct FlatViewInfo
*fvi
= user_data
;
2996 fprintf_function p
= fvi
->mon_printf
;
2998 FlatRange
*range
= &view
->ranges
[0];
3004 p(f
, "FlatView #%d\n", fvi
->counter
);
3007 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3008 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3009 p(f
, " AS \"%s\", root: %s", as
->name
, memory_region_name(as
->root
));
3010 if (as
->root
->alias
) {
3011 p(f
, ", alias %s", memory_region_name(as
->root
->alias
));
3016 p(f
, " Root memory region: %s\n",
3017 view
->root
? memory_region_name(view
->root
) : "(none)");
3020 p(f
, MTREE_INDENT
"No rendered FlatView\n\n");
3026 if (range
->offset_in_region
) {
3027 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
3028 TARGET_FMT_plx
" (prio %d, %s): %s @" TARGET_FMT_plx
"\n",
3029 int128_get64(range
->addr
.start
),
3030 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
3032 range
->readonly
? "rom" : memory_region_type(mr
),
3033 memory_region_name(mr
),
3034 range
->offset_in_region
);
3036 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
3037 TARGET_FMT_plx
" (prio %d, %s): %s\n",
3038 int128_get64(range
->addr
.start
),
3039 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
3041 range
->readonly
? "rom" : memory_region_type(mr
),
3042 memory_region_name(mr
));
3047 #if !defined(CONFIG_USER_ONLY)
3048 if (fvi
->dispatch_tree
&& view
->root
) {
3049 mtree_print_dispatch(p
, f
, view
->dispatch
, view
->root
);
3056 static gboolean
mtree_info_flatview_free(gpointer key
, gpointer value
,
3059 FlatView
*view
= key
;
3060 GArray
*fv_address_spaces
= value
;
3062 g_array_unref(fv_address_spaces
);
3063 flatview_unref(view
);
3068 void mtree_info(fprintf_function mon_printf
, void *f
, bool flatview
,
3071 MemoryRegionListHead ml_head
;
3072 MemoryRegionList
*ml
, *ml2
;
3077 struct FlatViewInfo fvi
= {
3078 .mon_printf
= mon_printf
,
3081 .dispatch_tree
= dispatch_tree
3083 GArray
*fv_address_spaces
;
3084 GHashTable
*views
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
3086 /* Gather all FVs in one table */
3087 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3088 view
= address_space_get_flatview(as
);
3090 fv_address_spaces
= g_hash_table_lookup(views
, view
);
3091 if (!fv_address_spaces
) {
3092 fv_address_spaces
= g_array_new(false, false, sizeof(as
));
3093 g_hash_table_insert(views
, view
, fv_address_spaces
);
3096 g_array_append_val(fv_address_spaces
, as
);
3100 g_hash_table_foreach(views
, mtree_print_flatview
, &fvi
);
3103 g_hash_table_foreach_remove(views
, mtree_info_flatview_free
, 0);
3104 g_hash_table_unref(views
);
3109 QTAILQ_INIT(&ml_head
);
3111 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3112 mon_printf(f
, "address-space: %s\n", as
->name
);
3113 mtree_print_mr(mon_printf
, f
, as
->root
, 1, 0, &ml_head
);
3114 mon_printf(f
, "\n");
3117 /* print aliased regions */
3118 QTAILQ_FOREACH(ml
, &ml_head
, mrqueue
) {
3119 mon_printf(f
, "memory-region: %s\n", memory_region_name(ml
->mr
));
3120 mtree_print_mr(mon_printf
, f
, ml
->mr
, 1, 0, &ml_head
);
3121 mon_printf(f
, "\n");
3124 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, mrqueue
, ml2
) {
3129 void memory_region_init_ram(MemoryRegion
*mr
,
3130 struct Object
*owner
,
3135 DeviceState
*owner_dev
;
3138 memory_region_init_ram_nomigrate(mr
, owner
, name
, size
, &err
);
3140 error_propagate(errp
, err
);
3143 /* This will assert if owner is neither NULL nor a DeviceState.
3144 * We only want the owner here for the purposes of defining a
3145 * unique name for migration. TODO: Ideally we should implement
3146 * a naming scheme for Objects which are not DeviceStates, in
3147 * which case we can relax this restriction.
3149 owner_dev
= DEVICE(owner
);
3150 vmstate_register_ram(mr
, owner_dev
);
3153 void memory_region_init_rom(MemoryRegion
*mr
,
3154 struct Object
*owner
,
3159 DeviceState
*owner_dev
;
3162 memory_region_init_rom_nomigrate(mr
, owner
, name
, size
, &err
);
3164 error_propagate(errp
, err
);
3167 /* This will assert if owner is neither NULL nor a DeviceState.
3168 * We only want the owner here for the purposes of defining a
3169 * unique name for migration. TODO: Ideally we should implement
3170 * a naming scheme for Objects which are not DeviceStates, in
3171 * which case we can relax this restriction.
3173 owner_dev
= DEVICE(owner
);
3174 vmstate_register_ram(mr
, owner_dev
);
3177 void memory_region_init_rom_device(MemoryRegion
*mr
,
3178 struct Object
*owner
,
3179 const MemoryRegionOps
*ops
,
3185 DeviceState
*owner_dev
;
3188 memory_region_init_rom_device_nomigrate(mr
, owner
, ops
, opaque
,
3191 error_propagate(errp
, err
);
3194 /* This will assert if owner is neither NULL nor a DeviceState.
3195 * We only want the owner here for the purposes of defining a
3196 * unique name for migration. TODO: Ideally we should implement
3197 * a naming scheme for Objects which are not DeviceStates, in
3198 * which case we can relax this restriction.
3200 owner_dev
= DEVICE(owner
);
3201 vmstate_register_ram(mr
, owner_dev
);
3204 static const TypeInfo memory_region_info
= {
3205 .parent
= TYPE_OBJECT
,
3206 .name
= TYPE_MEMORY_REGION
,
3207 .instance_size
= sizeof(MemoryRegion
),
3208 .instance_init
= memory_region_initfn
,
3209 .instance_finalize
= memory_region_finalize
,
3212 static const TypeInfo iommu_memory_region_info
= {
3213 .parent
= TYPE_MEMORY_REGION
,
3214 .name
= TYPE_IOMMU_MEMORY_REGION
,
3215 .class_size
= sizeof(IOMMUMemoryRegionClass
),
3216 .instance_size
= sizeof(IOMMUMemoryRegion
),
3217 .instance_init
= iommu_memory_region_initfn
,
3221 static void memory_register_types(void)
3223 type_register_static(&memory_region_info
);
3224 type_register_static(&iommu_memory_region_info
);
3227 type_init(memory_register_types
)