2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "exec/ioport.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qom/object.h"
27 #include "trace-root.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/misc/mmio_interface.h"
34 #include "hw/qdev-properties.h"
35 #include "migration/vmstate.h"
37 //#define DEBUG_UNASSIGNED
39 static unsigned memory_region_transaction_depth
;
40 static bool memory_region_update_pending
;
41 static bool ioeventfd_update_pending
;
42 static bool global_dirty_log
= false;
44 static QTAILQ_HEAD(memory_listeners
, MemoryListener
) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
47 static QTAILQ_HEAD(, AddressSpace
) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
50 typedef struct AddrRange AddrRange
;
53 * Note that signed integers are needed for negative offsetting in aliases
54 * (large MemoryRegion::alias_offset).
61 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
63 return (AddrRange
) { start
, size
};
66 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
68 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
71 static Int128
addrrange_end(AddrRange r
)
73 return int128_add(r
.start
, r
.size
);
76 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
78 int128_addto(&range
.start
, delta
);
82 static bool addrrange_contains(AddrRange range
, Int128 addr
)
84 return int128_ge(addr
, range
.start
)
85 && int128_lt(addr
, addrrange_end(range
));
88 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
90 return addrrange_contains(r1
, r2
.start
)
91 || addrrange_contains(r2
, r1
.start
);
94 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
96 Int128 start
= int128_max(r1
.start
, r2
.start
);
97 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
98 return addrrange_make(start
, int128_sub(end
, start
));
101 enum ListenerDirection
{ Forward
, Reverse
};
103 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
105 MemoryListener *_listener; \
107 switch (_direction) { \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
128 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
130 MemoryListener *_listener; \
131 struct memory_listeners_as *list = &(_as)->listeners; \
133 switch (_direction) { \
135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
137 _listener->_callback(_listener, _section, ##_args); \
142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
162 struct CoalescedMemoryRange
{
164 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
167 struct MemoryRegionIoeventfd
{
174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
175 MemoryRegionIoeventfd b
)
177 if (int128_lt(a
.addr
.start
, b
.addr
.start
)) {
179 } else if (int128_gt(a
.addr
.start
, b
.addr
.start
)) {
181 } else if (int128_lt(a
.addr
.size
, b
.addr
.size
)) {
183 } else if (int128_gt(a
.addr
.size
, b
.addr
.size
)) {
185 } else if (a
.match_data
< b
.match_data
) {
187 } else if (a
.match_data
> b
.match_data
) {
189 } else if (a
.match_data
) {
190 if (a
.data
< b
.data
) {
192 } else if (a
.data
> b
.data
) {
198 } else if (a
.e
> b
.e
) {
204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
205 MemoryRegionIoeventfd b
)
207 return !memory_region_ioeventfd_before(a
, b
)
208 && !memory_region_ioeventfd_before(b
, a
);
211 typedef struct FlatRange FlatRange
;
213 /* Range of memory in the global map. Addresses are absolute. */
216 hwaddr offset_in_region
;
218 uint8_t dirty_log_mask
;
223 /* Flattened global view of current active memory hierarchy. Kept in sorted
231 unsigned nr_allocated
;
232 struct AddressSpaceDispatch
*dispatch
;
235 typedef struct AddressSpaceOps AddressSpaceOps
;
237 #define FOR_EACH_FLAT_RANGE(var, view) \
238 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
240 static inline MemoryRegionSection
241 section_from_flat_range(FlatRange
*fr
, FlatView
*fv
)
243 return (MemoryRegionSection
) {
246 .offset_within_region
= fr
->offset_in_region
,
247 .size
= fr
->addr
.size
,
248 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
249 .readonly
= fr
->readonly
,
253 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
255 return a
->mr
== b
->mr
256 && addrrange_equal(a
->addr
, b
->addr
)
257 && a
->offset_in_region
== b
->offset_in_region
258 && a
->romd_mode
== b
->romd_mode
259 && a
->readonly
== b
->readonly
;
262 static FlatView
*flatview_new(void)
266 view
= g_new0(FlatView
, 1);
272 /* Insert a range into a given position. Caller is responsible for maintaining
275 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
277 if (view
->nr
== view
->nr_allocated
) {
278 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
279 view
->ranges
= g_realloc(view
->ranges
,
280 view
->nr_allocated
* sizeof(*view
->ranges
));
282 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
283 (view
->nr
- pos
) * sizeof(FlatRange
));
284 view
->ranges
[pos
] = *range
;
285 memory_region_ref(range
->mr
);
289 static void flatview_destroy(FlatView
*view
)
293 if (view
->dispatch
) {
294 address_space_dispatch_free(view
->dispatch
);
296 for (i
= 0; i
< view
->nr
; i
++) {
297 memory_region_unref(view
->ranges
[i
].mr
);
299 g_free(view
->ranges
);
303 static bool flatview_ref(FlatView
*view
)
305 return atomic_fetch_inc_nonzero(&view
->ref
) > 0;
308 static void flatview_unref(FlatView
*view
)
310 if (atomic_fetch_dec(&view
->ref
) == 1) {
311 call_rcu(view
, flatview_destroy
, rcu
);
315 FlatView
*address_space_to_flatview(AddressSpace
*as
)
317 return atomic_rcu_read(&as
->current_map
);
320 AddressSpaceDispatch
*flatview_to_dispatch(FlatView
*fv
)
325 AddressSpaceDispatch
*address_space_to_dispatch(AddressSpace
*as
)
327 return flatview_to_dispatch(address_space_to_flatview(as
));
330 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
332 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
334 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
336 int128_make64(r2
->offset_in_region
))
337 && r1
->dirty_log_mask
== r2
->dirty_log_mask
338 && r1
->romd_mode
== r2
->romd_mode
339 && r1
->readonly
== r2
->readonly
;
342 /* Attempt to simplify a view by merging adjacent ranges */
343 static void flatview_simplify(FlatView
*view
)
348 while (i
< view
->nr
) {
351 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
352 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
356 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
357 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
362 static bool memory_region_big_endian(MemoryRegion
*mr
)
364 #ifdef TARGET_WORDS_BIGENDIAN
365 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
367 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
371 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
373 #ifdef TARGET_WORDS_BIGENDIAN
374 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
376 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
380 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
382 if (memory_region_wrong_endianness(mr
)) {
387 *data
= bswap16(*data
);
390 *data
= bswap32(*data
);
393 *data
= bswap64(*data
);
401 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
404 hwaddr abs_addr
= offset
;
406 abs_addr
+= mr
->addr
;
407 for (root
= mr
; root
->container
; ) {
408 root
= root
->container
;
409 abs_addr
+= root
->addr
;
415 static int get_cpu_index(void)
418 return current_cpu
->cpu_index
;
423 static MemTxResult
memory_region_oldmmio_read_accessor(MemoryRegion
*mr
,
433 tmp
= mr
->ops
->old_mmio
.read
[ctz32(size
)](mr
->opaque
, addr
);
435 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
436 } else if (mr
== &io_mem_notdirty
) {
437 /* Accesses to code which has previously been translated into a TB show
438 * up in the MMIO path, as accesses to the io_mem_notdirty
440 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
441 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
442 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
443 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
445 *value
|= (tmp
& mask
) << shift
;
449 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
459 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
461 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
462 } else if (mr
== &io_mem_notdirty
) {
463 /* Accesses to code which has previously been translated into a TB show
464 * up in the MMIO path, as accesses to the io_mem_notdirty
466 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
467 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
468 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
469 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
471 *value
|= (tmp
& mask
) << shift
;
475 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
486 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
488 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
489 } else if (mr
== &io_mem_notdirty
) {
490 /* Accesses to code which has previously been translated into a TB show
491 * up in the MMIO path, as accesses to the io_mem_notdirty
493 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
494 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
495 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
496 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
498 *value
|= (tmp
& mask
) << shift
;
502 static MemTxResult
memory_region_oldmmio_write_accessor(MemoryRegion
*mr
,
512 tmp
= (*value
>> shift
) & mask
;
514 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
515 } else if (mr
== &io_mem_notdirty
) {
516 /* Accesses to code which has previously been translated into a TB show
517 * up in the MMIO path, as accesses to the io_mem_notdirty
519 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
520 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
521 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
522 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
524 mr
->ops
->old_mmio
.write
[ctz32(size
)](mr
->opaque
, addr
, tmp
);
528 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
538 tmp
= (*value
>> shift
) & mask
;
540 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
541 } else if (mr
== &io_mem_notdirty
) {
542 /* Accesses to code which has previously been translated into a TB show
543 * up in the MMIO path, as accesses to the io_mem_notdirty
545 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
546 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
547 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
548 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
550 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
554 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
564 tmp
= (*value
>> shift
) & mask
;
566 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
567 } else if (mr
== &io_mem_notdirty
) {
568 /* Accesses to code which has previously been translated into a TB show
569 * up in the MMIO path, as accesses to the io_mem_notdirty
571 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
572 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
573 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
574 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
576 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
579 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
582 unsigned access_size_min
,
583 unsigned access_size_max
,
584 MemTxResult (*access_fn
)
595 uint64_t access_mask
;
596 unsigned access_size
;
598 MemTxResult r
= MEMTX_OK
;
600 if (!access_size_min
) {
603 if (!access_size_max
) {
607 /* FIXME: support unaligned access? */
608 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
609 access_mask
= -1ULL >> (64 - access_size
* 8);
610 if (memory_region_big_endian(mr
)) {
611 for (i
= 0; i
< size
; i
+= access_size
) {
612 r
|= access_fn(mr
, addr
+ i
, value
, access_size
,
613 (size
- access_size
- i
) * 8, access_mask
, attrs
);
616 for (i
= 0; i
< size
; i
+= access_size
) {
617 r
|= access_fn(mr
, addr
+ i
, value
, access_size
, i
* 8,
624 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
628 while (mr
->container
) {
631 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
632 if (mr
== as
->root
) {
639 /* Render a memory region into the global view. Ranges in @view obscure
642 static void render_memory_region(FlatView
*view
,
648 MemoryRegion
*subregion
;
650 hwaddr offset_in_region
;
660 int128_addto(&base
, int128_make64(mr
->addr
));
661 readonly
|= mr
->readonly
;
663 tmp
= addrrange_make(base
, mr
->size
);
665 if (!addrrange_intersects(tmp
, clip
)) {
669 clip
= addrrange_intersection(tmp
, clip
);
672 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
673 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
674 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
678 /* Render subregions in priority order. */
679 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
680 render_memory_region(view
, subregion
, base
, clip
, readonly
);
683 if (!mr
->terminates
) {
687 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
692 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
693 fr
.romd_mode
= mr
->romd_mode
;
694 fr
.readonly
= readonly
;
696 /* Render the region itself into any gaps left by the current view. */
697 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
698 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
701 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
702 now
= int128_min(remain
,
703 int128_sub(view
->ranges
[i
].addr
.start
, base
));
704 fr
.offset_in_region
= offset_in_region
;
705 fr
.addr
= addrrange_make(base
, now
);
706 flatview_insert(view
, i
, &fr
);
708 int128_addto(&base
, now
);
709 offset_in_region
+= int128_get64(now
);
710 int128_subfrom(&remain
, now
);
712 now
= int128_sub(int128_min(int128_add(base
, remain
),
713 addrrange_end(view
->ranges
[i
].addr
)),
715 int128_addto(&base
, now
);
716 offset_in_region
+= int128_get64(now
);
717 int128_subfrom(&remain
, now
);
719 if (int128_nz(remain
)) {
720 fr
.offset_in_region
= offset_in_region
;
721 fr
.addr
= addrrange_make(base
, remain
);
722 flatview_insert(view
, i
, &fr
);
726 /* Render a memory topology into a list of disjoint absolute ranges. */
727 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
731 view
= flatview_new();
734 render_memory_region(view
, mr
, int128_zero(),
735 addrrange_make(int128_zero(), int128_2_64()), false);
737 flatview_simplify(view
);
742 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
743 MemoryRegionIoeventfd
*fds_new
,
745 MemoryRegionIoeventfd
*fds_old
,
749 MemoryRegionIoeventfd
*fd
;
750 MemoryRegionSection section
;
752 /* Generate a symmetric difference of the old and new fd sets, adding
753 * and deleting as necessary.
757 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
758 if (iold
< fds_old_nb
759 && (inew
== fds_new_nb
760 || memory_region_ioeventfd_before(fds_old
[iold
],
763 section
= (MemoryRegionSection
) {
764 .fv
= address_space_to_flatview(as
),
765 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
766 .size
= fd
->addr
.size
,
768 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
769 fd
->match_data
, fd
->data
, fd
->e
);
771 } else if (inew
< fds_new_nb
772 && (iold
== fds_old_nb
773 || memory_region_ioeventfd_before(fds_new
[inew
],
776 section
= (MemoryRegionSection
) {
777 .fv
= address_space_to_flatview(as
),
778 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
779 .size
= fd
->addr
.size
,
781 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
782 fd
->match_data
, fd
->data
, fd
->e
);
791 static FlatView
*address_space_get_flatview(AddressSpace
*as
)
797 view
= address_space_to_flatview(as
);
798 /* If somebody has replaced as->current_map concurrently,
799 * flatview_ref returns false.
801 } while (!flatview_ref(view
));
806 static void address_space_update_ioeventfds(AddressSpace
*as
)
810 unsigned ioeventfd_nb
= 0;
811 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
815 view
= address_space_get_flatview(as
);
816 FOR_EACH_FLAT_RANGE(fr
, view
) {
817 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
818 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
819 int128_sub(fr
->addr
.start
,
820 int128_make64(fr
->offset_in_region
)));
821 if (addrrange_intersects(fr
->addr
, tmp
)) {
823 ioeventfds
= g_realloc(ioeventfds
,
824 ioeventfd_nb
* sizeof(*ioeventfds
));
825 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
826 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
831 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
832 as
->ioeventfds
, as
->ioeventfd_nb
);
834 g_free(as
->ioeventfds
);
835 as
->ioeventfds
= ioeventfds
;
836 as
->ioeventfd_nb
= ioeventfd_nb
;
837 flatview_unref(view
);
840 static void address_space_update_topology_pass(AddressSpace
*as
,
841 const FlatView
*old_view
,
842 const FlatView
*new_view
,
846 FlatRange
*frold
, *frnew
;
848 /* Generate a symmetric difference of the old and new memory maps.
849 * Kill ranges in the old map, and instantiate ranges in the new map.
852 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
853 if (iold
< old_view
->nr
) {
854 frold
= &old_view
->ranges
[iold
];
858 if (inew
< new_view
->nr
) {
859 frnew
= &new_view
->ranges
[inew
];
866 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
867 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
868 && !flatrange_equal(frold
, frnew
)))) {
869 /* In old but not in new, or in both but attributes changed. */
872 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
876 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
877 /* In both and unchanged (except logging may have changed) */
880 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
881 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
882 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
883 frold
->dirty_log_mask
,
884 frnew
->dirty_log_mask
);
886 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
887 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
888 frold
->dirty_log_mask
,
889 frnew
->dirty_log_mask
);
899 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
907 static void address_space_update_topology(AddressSpace
*as
)
909 FlatView
*old_view
= address_space_get_flatview(as
);
910 FlatView
*new_view
= generate_memory_topology(as
->root
);
913 new_view
->dispatch
= address_space_dispatch_new(new_view
);
914 for (i
= 0; i
< new_view
->nr
; i
++) {
915 MemoryRegionSection mrs
=
916 section_from_flat_range(&new_view
->ranges
[i
], new_view
);
917 flatview_add_to_dispatch(new_view
, &mrs
);
919 address_space_dispatch_compact(new_view
->dispatch
);
921 if (!QTAILQ_EMPTY(&as
->listeners
)) {
922 address_space_update_topology_pass(as
, old_view
, new_view
, false);
923 address_space_update_topology_pass(as
, old_view
, new_view
, true);
926 /* Writes are protected by the BQL. */
927 atomic_rcu_set(&as
->current_map
, new_view
);
928 flatview_unref(old_view
);
930 /* Note that all the old MemoryRegions are still alive up to this
931 * point. This relieves most MemoryListeners from the need to
932 * ref/unref the MemoryRegions they get---unless they use them
933 * outside the iothread mutex, in which case precise reference
934 * counting is necessary.
936 flatview_unref(old_view
);
938 address_space_update_ioeventfds(as
);
941 void memory_region_transaction_begin(void)
943 qemu_flush_coalesced_mmio_buffer();
944 ++memory_region_transaction_depth
;
947 void memory_region_transaction_commit(void)
951 assert(memory_region_transaction_depth
);
952 assert(qemu_mutex_iothread_locked());
954 --memory_region_transaction_depth
;
955 if (!memory_region_transaction_depth
) {
956 if (memory_region_update_pending
) {
957 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
959 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
960 address_space_update_topology(as
);
962 memory_region_update_pending
= false;
963 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
964 } else if (ioeventfd_update_pending
) {
965 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
966 address_space_update_ioeventfds(as
);
968 ioeventfd_update_pending
= false;
973 static void memory_region_destructor_none(MemoryRegion
*mr
)
977 static void memory_region_destructor_ram(MemoryRegion
*mr
)
979 qemu_ram_free(mr
->ram_block
);
982 static bool memory_region_need_escape(char c
)
984 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
987 static char *memory_region_escape_name(const char *name
)
994 for (p
= name
; *p
; p
++) {
995 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
997 if (bytes
== p
- name
) {
998 return g_memdup(name
, bytes
+ 1);
1001 escaped
= g_malloc(bytes
+ 1);
1002 for (p
= name
, q
= escaped
; *p
; p
++) {
1004 if (unlikely(memory_region_need_escape(c
))) {
1007 *q
++ = "0123456789abcdef"[c
>> 4];
1008 c
= "0123456789abcdef"[c
& 15];
1016 static void memory_region_do_init(MemoryRegion
*mr
,
1021 mr
->size
= int128_make64(size
);
1022 if (size
== UINT64_MAX
) {
1023 mr
->size
= int128_2_64();
1025 mr
->name
= g_strdup(name
);
1027 mr
->ram_block
= NULL
;
1030 char *escaped_name
= memory_region_escape_name(name
);
1031 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1034 owner
= container_get(qdev_get_machine(), "/unattached");
1037 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
1038 object_unref(OBJECT(mr
));
1040 g_free(escaped_name
);
1044 void memory_region_init(MemoryRegion
*mr
,
1049 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
1050 memory_region_do_init(mr
, owner
, name
, size
);
1053 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, const char *name
,
1054 void *opaque
, Error
**errp
)
1056 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1057 uint64_t value
= mr
->addr
;
1059 visit_type_uint64(v
, name
, &value
, errp
);
1062 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1063 const char *name
, void *opaque
,
1066 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1067 gchar
*path
= (gchar
*)"";
1069 if (mr
->container
) {
1070 path
= object_get_canonical_path(OBJECT(mr
->container
));
1072 visit_type_str(v
, name
, &path
, errp
);
1073 if (mr
->container
) {
1078 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1081 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1083 return OBJECT(mr
->container
);
1086 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1087 const char *name
, void *opaque
,
1090 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1091 int32_t value
= mr
->priority
;
1093 visit_type_int32(v
, name
, &value
, errp
);
1096 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1097 void *opaque
, Error
**errp
)
1099 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1100 uint64_t value
= memory_region_size(mr
);
1102 visit_type_uint64(v
, name
, &value
, errp
);
1105 static void memory_region_initfn(Object
*obj
)
1107 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1110 mr
->ops
= &unassigned_mem_ops
;
1112 mr
->romd_mode
= true;
1113 mr
->global_locking
= true;
1114 mr
->destructor
= memory_region_destructor_none
;
1115 QTAILQ_INIT(&mr
->subregions
);
1116 QTAILQ_INIT(&mr
->coalesced
);
1118 op
= object_property_add(OBJECT(mr
), "container",
1119 "link<" TYPE_MEMORY_REGION
">",
1120 memory_region_get_container
,
1121 NULL
, /* memory_region_set_container */
1122 NULL
, NULL
, &error_abort
);
1123 op
->resolve
= memory_region_resolve_container
;
1125 object_property_add(OBJECT(mr
), "addr", "uint64",
1126 memory_region_get_addr
,
1127 NULL
, /* memory_region_set_addr */
1128 NULL
, NULL
, &error_abort
);
1129 object_property_add(OBJECT(mr
), "priority", "uint32",
1130 memory_region_get_priority
,
1131 NULL
, /* memory_region_set_priority */
1132 NULL
, NULL
, &error_abort
);
1133 object_property_add(OBJECT(mr
), "size", "uint64",
1134 memory_region_get_size
,
1135 NULL
, /* memory_region_set_size, */
1136 NULL
, NULL
, &error_abort
);
1139 static void iommu_memory_region_initfn(Object
*obj
)
1141 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1143 mr
->is_iommu
= true;
1146 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1149 #ifdef DEBUG_UNASSIGNED
1150 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1152 if (current_cpu
!= NULL
) {
1153 cpu_unassigned_access(current_cpu
, addr
, false, false, 0, size
);
1158 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1159 uint64_t val
, unsigned size
)
1161 #ifdef DEBUG_UNASSIGNED
1162 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1164 if (current_cpu
!= NULL
) {
1165 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1169 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1170 unsigned size
, bool is_write
)
1175 const MemoryRegionOps unassigned_mem_ops
= {
1176 .valid
.accepts
= unassigned_mem_accepts
,
1177 .endianness
= DEVICE_NATIVE_ENDIAN
,
1180 static uint64_t memory_region_ram_device_read(void *opaque
,
1181 hwaddr addr
, unsigned size
)
1183 MemoryRegion
*mr
= opaque
;
1184 uint64_t data
= (uint64_t)~0;
1188 data
= *(uint8_t *)(mr
->ram_block
->host
+ addr
);
1191 data
= *(uint16_t *)(mr
->ram_block
->host
+ addr
);
1194 data
= *(uint32_t *)(mr
->ram_block
->host
+ addr
);
1197 data
= *(uint64_t *)(mr
->ram_block
->host
+ addr
);
1201 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1206 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1207 uint64_t data
, unsigned size
)
1209 MemoryRegion
*mr
= opaque
;
1211 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1215 *(uint8_t *)(mr
->ram_block
->host
+ addr
) = (uint8_t)data
;
1218 *(uint16_t *)(mr
->ram_block
->host
+ addr
) = (uint16_t)data
;
1221 *(uint32_t *)(mr
->ram_block
->host
+ addr
) = (uint32_t)data
;
1224 *(uint64_t *)(mr
->ram_block
->host
+ addr
) = data
;
1229 static const MemoryRegionOps ram_device_mem_ops
= {
1230 .read
= memory_region_ram_device_read
,
1231 .write
= memory_region_ram_device_write
,
1232 .endianness
= DEVICE_HOST_ENDIAN
,
1234 .min_access_size
= 1,
1235 .max_access_size
= 8,
1239 .min_access_size
= 1,
1240 .max_access_size
= 8,
1245 bool memory_region_access_valid(MemoryRegion
*mr
,
1250 int access_size_min
, access_size_max
;
1253 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1257 if (!mr
->ops
->valid
.accepts
) {
1261 access_size_min
= mr
->ops
->valid
.min_access_size
;
1262 if (!mr
->ops
->valid
.min_access_size
) {
1263 access_size_min
= 1;
1266 access_size_max
= mr
->ops
->valid
.max_access_size
;
1267 if (!mr
->ops
->valid
.max_access_size
) {
1268 access_size_max
= 4;
1271 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1272 for (i
= 0; i
< size
; i
+= access_size
) {
1273 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1282 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1290 if (mr
->ops
->read
) {
1291 return access_with_adjusted_size(addr
, pval
, size
,
1292 mr
->ops
->impl
.min_access_size
,
1293 mr
->ops
->impl
.max_access_size
,
1294 memory_region_read_accessor
,
1296 } else if (mr
->ops
->read_with_attrs
) {
1297 return access_with_adjusted_size(addr
, pval
, size
,
1298 mr
->ops
->impl
.min_access_size
,
1299 mr
->ops
->impl
.max_access_size
,
1300 memory_region_read_with_attrs_accessor
,
1303 return access_with_adjusted_size(addr
, pval
, size
, 1, 4,
1304 memory_region_oldmmio_read_accessor
,
1309 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1317 if (!memory_region_access_valid(mr
, addr
, size
, false)) {
1318 *pval
= unassigned_mem_read(mr
, addr
, size
);
1319 return MEMTX_DECODE_ERROR
;
1322 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1323 adjust_endianness(mr
, pval
, size
);
1327 /* Return true if an eventfd was signalled */
1328 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1334 MemoryRegionIoeventfd ioeventfd
= {
1335 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1340 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1341 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1342 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1344 if (memory_region_ioeventfd_equal(ioeventfd
, mr
->ioeventfds
[i
])) {
1345 event_notifier_set(ioeventfd
.e
);
1353 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1359 if (!memory_region_access_valid(mr
, addr
, size
, true)) {
1360 unassigned_mem_write(mr
, addr
, data
, size
);
1361 return MEMTX_DECODE_ERROR
;
1364 adjust_endianness(mr
, &data
, size
);
1366 if ((!kvm_eventfds_enabled()) &&
1367 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1371 if (mr
->ops
->write
) {
1372 return access_with_adjusted_size(addr
, &data
, size
,
1373 mr
->ops
->impl
.min_access_size
,
1374 mr
->ops
->impl
.max_access_size
,
1375 memory_region_write_accessor
, mr
,
1377 } else if (mr
->ops
->write_with_attrs
) {
1379 access_with_adjusted_size(addr
, &data
, size
,
1380 mr
->ops
->impl
.min_access_size
,
1381 mr
->ops
->impl
.max_access_size
,
1382 memory_region_write_with_attrs_accessor
,
1385 return access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1386 memory_region_oldmmio_write_accessor
,
1391 void memory_region_init_io(MemoryRegion
*mr
,
1393 const MemoryRegionOps
*ops
,
1398 memory_region_init(mr
, owner
, name
, size
);
1399 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1400 mr
->opaque
= opaque
;
1401 mr
->terminates
= true;
1404 void memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
1410 memory_region_init(mr
, owner
, name
, size
);
1412 mr
->terminates
= true;
1413 mr
->destructor
= memory_region_destructor_ram
;
1414 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1415 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1418 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1423 void (*resized
)(const char*,
1428 memory_region_init(mr
, owner
, name
, size
);
1430 mr
->terminates
= true;
1431 mr
->destructor
= memory_region_destructor_ram
;
1432 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1434 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1438 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1439 struct Object
*owner
,
1446 memory_region_init(mr
, owner
, name
, size
);
1448 mr
->terminates
= true;
1449 mr
->destructor
= memory_region_destructor_ram
;
1450 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, share
, path
, errp
);
1451 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1454 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
1455 struct Object
*owner
,
1462 memory_region_init(mr
, owner
, name
, size
);
1464 mr
->terminates
= true;
1465 mr
->destructor
= memory_region_destructor_ram
;
1466 mr
->ram_block
= qemu_ram_alloc_from_fd(size
, mr
, share
, fd
, errp
);
1467 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1471 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1477 memory_region_init(mr
, owner
, name
, size
);
1479 mr
->terminates
= true;
1480 mr
->destructor
= memory_region_destructor_ram
;
1481 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1483 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1484 assert(ptr
!= NULL
);
1485 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1488 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1494 memory_region_init_ram_ptr(mr
, owner
, name
, size
, ptr
);
1495 mr
->ram_device
= true;
1496 mr
->ops
= &ram_device_mem_ops
;
1500 void memory_region_init_alias(MemoryRegion
*mr
,
1507 memory_region_init(mr
, owner
, name
, size
);
1509 mr
->alias_offset
= offset
;
1512 void memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
1513 struct Object
*owner
,
1518 memory_region_init(mr
, owner
, name
, size
);
1520 mr
->readonly
= true;
1521 mr
->terminates
= true;
1522 mr
->destructor
= memory_region_destructor_ram
;
1523 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1524 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1527 void memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
1529 const MemoryRegionOps
*ops
,
1536 memory_region_init(mr
, owner
, name
, size
);
1538 mr
->opaque
= opaque
;
1539 mr
->terminates
= true;
1540 mr
->rom_device
= true;
1541 mr
->destructor
= memory_region_destructor_ram
;
1542 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1545 void memory_region_init_iommu(void *_iommu_mr
,
1546 size_t instance_size
,
1547 const char *mrtypename
,
1552 struct IOMMUMemoryRegion
*iommu_mr
;
1553 struct MemoryRegion
*mr
;
1555 object_initialize(_iommu_mr
, instance_size
, mrtypename
);
1556 mr
= MEMORY_REGION(_iommu_mr
);
1557 memory_region_do_init(mr
, owner
, name
, size
);
1558 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1559 mr
->terminates
= true; /* then re-forwards */
1560 QLIST_INIT(&iommu_mr
->iommu_notify
);
1561 iommu_mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1564 static void memory_region_finalize(Object
*obj
)
1566 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1568 assert(!mr
->container
);
1570 /* We know the region is not visible in any address space (it
1571 * does not have a container and cannot be a root either because
1572 * it has no references, so we can blindly clear mr->enabled.
1573 * memory_region_set_enabled instead could trigger a transaction
1574 * and cause an infinite loop.
1576 mr
->enabled
= false;
1577 memory_region_transaction_begin();
1578 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1579 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1580 memory_region_del_subregion(mr
, subregion
);
1582 memory_region_transaction_commit();
1585 memory_region_clear_coalescing(mr
);
1586 g_free((char *)mr
->name
);
1587 g_free(mr
->ioeventfds
);
1590 Object
*memory_region_owner(MemoryRegion
*mr
)
1592 Object
*obj
= OBJECT(mr
);
1596 void memory_region_ref(MemoryRegion
*mr
)
1598 /* MMIO callbacks most likely will access data that belongs
1599 * to the owner, hence the need to ref/unref the owner whenever
1600 * the memory region is in use.
1602 * The memory region is a child of its owner. As long as the
1603 * owner doesn't call unparent itself on the memory region,
1604 * ref-ing the owner will also keep the memory region alive.
1605 * Memory regions without an owner are supposed to never go away;
1606 * we do not ref/unref them because it slows down DMA sensibly.
1608 if (mr
&& mr
->owner
) {
1609 object_ref(mr
->owner
);
1613 void memory_region_unref(MemoryRegion
*mr
)
1615 if (mr
&& mr
->owner
) {
1616 object_unref(mr
->owner
);
1620 uint64_t memory_region_size(MemoryRegion
*mr
)
1622 if (int128_eq(mr
->size
, int128_2_64())) {
1625 return int128_get64(mr
->size
);
1628 const char *memory_region_name(const MemoryRegion
*mr
)
1631 ((MemoryRegion
*)mr
)->name
=
1632 object_get_canonical_path_component(OBJECT(mr
));
1637 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1639 return mr
->ram_device
;
1642 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1644 uint8_t mask
= mr
->dirty_log_mask
;
1645 if (global_dirty_log
&& mr
->ram_block
) {
1646 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1651 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1653 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1656 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion
*iommu_mr
)
1658 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1659 IOMMUNotifier
*iommu_notifier
;
1660 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1662 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1663 flags
|= iommu_notifier
->notifier_flags
;
1666 if (flags
!= iommu_mr
->iommu_notify_flags
&& imrc
->notify_flag_changed
) {
1667 imrc
->notify_flag_changed(iommu_mr
,
1668 iommu_mr
->iommu_notify_flags
,
1672 iommu_mr
->iommu_notify_flags
= flags
;
1675 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1678 IOMMUMemoryRegion
*iommu_mr
;
1681 memory_region_register_iommu_notifier(mr
->alias
, n
);
1685 /* We need to register for at least one bitfield */
1686 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1687 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1688 assert(n
->start
<= n
->end
);
1689 QLIST_INSERT_HEAD(&iommu_mr
->iommu_notify
, n
, node
);
1690 memory_region_update_iommu_notify_flags(iommu_mr
);
1693 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
)
1695 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1697 if (imrc
->get_min_page_size
) {
1698 return imrc
->get_min_page_size(iommu_mr
);
1700 return TARGET_PAGE_SIZE
;
1703 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
1705 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
1706 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1707 hwaddr addr
, granularity
;
1708 IOMMUTLBEntry iotlb
;
1710 /* If the IOMMU has its own replay callback, override */
1712 imrc
->replay(iommu_mr
, n
);
1716 granularity
= memory_region_iommu_get_min_page_size(iommu_mr
);
1718 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1719 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
);
1720 if (iotlb
.perm
!= IOMMU_NONE
) {
1721 n
->notify(n
, &iotlb
);
1724 /* if (2^64 - MR size) < granularity, it's possible to get an
1725 * infinite loop here. This should catch such a wraparound */
1726 if ((addr
+ granularity
) < addr
) {
1732 void memory_region_iommu_replay_all(IOMMUMemoryRegion
*iommu_mr
)
1734 IOMMUNotifier
*notifier
;
1736 IOMMU_NOTIFIER_FOREACH(notifier
, iommu_mr
) {
1737 memory_region_iommu_replay(iommu_mr
, notifier
);
1741 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1744 IOMMUMemoryRegion
*iommu_mr
;
1747 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
1750 QLIST_REMOVE(n
, node
);
1751 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1752 memory_region_update_iommu_notify_flags(iommu_mr
);
1755 void memory_region_notify_one(IOMMUNotifier
*notifier
,
1756 IOMMUTLBEntry
*entry
)
1758 IOMMUNotifierFlag request_flags
;
1761 * Skip the notification if the notification does not overlap
1762 * with registered range.
1764 if (notifier
->start
> entry
->iova
+ entry
->addr_mask
+ 1 ||
1765 notifier
->end
< entry
->iova
) {
1769 if (entry
->perm
& IOMMU_RW
) {
1770 request_flags
= IOMMU_NOTIFIER_MAP
;
1772 request_flags
= IOMMU_NOTIFIER_UNMAP
;
1775 if (notifier
->notifier_flags
& request_flags
) {
1776 notifier
->notify(notifier
, entry
);
1780 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
1781 IOMMUTLBEntry entry
)
1783 IOMMUNotifier
*iommu_notifier
;
1785 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr
)));
1787 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1788 memory_region_notify_one(iommu_notifier
, &entry
);
1792 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
1794 uint8_t mask
= 1 << client
;
1795 uint8_t old_logging
;
1797 assert(client
== DIRTY_MEMORY_VGA
);
1798 old_logging
= mr
->vga_logging_count
;
1799 mr
->vga_logging_count
+= log
? 1 : -1;
1800 if (!!old_logging
== !!mr
->vga_logging_count
) {
1804 memory_region_transaction_begin();
1805 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
1806 memory_region_update_pending
|= mr
->enabled
;
1807 memory_region_transaction_commit();
1810 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
1811 hwaddr size
, unsigned client
)
1813 assert(mr
->ram_block
);
1814 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr
) + addr
,
1818 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
1821 assert(mr
->ram_block
);
1822 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
1824 memory_region_get_dirty_log_mask(mr
));
1827 bool memory_region_test_and_clear_dirty(MemoryRegion
*mr
, hwaddr addr
,
1828 hwaddr size
, unsigned client
)
1830 assert(mr
->ram_block
);
1831 return cpu_physical_memory_test_and_clear_dirty(
1832 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1835 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
1840 assert(mr
->ram_block
);
1841 return cpu_physical_memory_snapshot_and_clear_dirty(
1842 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1845 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
, DirtyBitmapSnapshot
*snap
,
1846 hwaddr addr
, hwaddr size
)
1848 assert(mr
->ram_block
);
1849 return cpu_physical_memory_snapshot_get_dirty(snap
,
1850 memory_region_get_ram_addr(mr
) + addr
, size
);
1853 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
1855 MemoryListener
*listener
;
1860 /* If the same address space has multiple log_sync listeners, we
1861 * visit that address space's FlatView multiple times. But because
1862 * log_sync listeners are rare, it's still cheaper than walking each
1863 * address space once.
1865 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
1866 if (!listener
->log_sync
) {
1869 as
= listener
->address_space
;
1870 view
= address_space_get_flatview(as
);
1871 FOR_EACH_FLAT_RANGE(fr
, view
) {
1873 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
1874 listener
->log_sync(listener
, &mrs
);
1877 flatview_unref(view
);
1881 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
1883 if (mr
->readonly
!= readonly
) {
1884 memory_region_transaction_begin();
1885 mr
->readonly
= readonly
;
1886 memory_region_update_pending
|= mr
->enabled
;
1887 memory_region_transaction_commit();
1891 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
1893 if (mr
->romd_mode
!= romd_mode
) {
1894 memory_region_transaction_begin();
1895 mr
->romd_mode
= romd_mode
;
1896 memory_region_update_pending
|= mr
->enabled
;
1897 memory_region_transaction_commit();
1901 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
1902 hwaddr size
, unsigned client
)
1904 assert(mr
->ram_block
);
1905 cpu_physical_memory_test_and_clear_dirty(
1906 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1909 int memory_region_get_fd(MemoryRegion
*mr
)
1917 fd
= mr
->ram_block
->fd
;
1923 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
1926 uint64_t offset
= 0;
1930 offset
+= mr
->alias_offset
;
1933 assert(mr
->ram_block
);
1934 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
1940 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
1944 block
= qemu_ram_block_from_host(ptr
, false, offset
);
1952 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
1954 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
1957 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
1959 assert(mr
->ram_block
);
1961 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
1964 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
1968 CoalescedMemoryRange
*cmr
;
1970 MemoryRegionSection section
;
1972 view
= address_space_get_flatview(as
);
1973 FOR_EACH_FLAT_RANGE(fr
, view
) {
1975 section
= (MemoryRegionSection
) {
1977 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
1978 .size
= fr
->addr
.size
,
1981 MEMORY_LISTENER_CALL(as
, coalesced_mmio_del
, Reverse
, §ion
,
1982 int128_get64(fr
->addr
.start
),
1983 int128_get64(fr
->addr
.size
));
1984 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
1985 tmp
= addrrange_shift(cmr
->addr
,
1986 int128_sub(fr
->addr
.start
,
1987 int128_make64(fr
->offset_in_region
)));
1988 if (!addrrange_intersects(tmp
, fr
->addr
)) {
1991 tmp
= addrrange_intersection(tmp
, fr
->addr
);
1992 MEMORY_LISTENER_CALL(as
, coalesced_mmio_add
, Forward
, §ion
,
1993 int128_get64(tmp
.start
),
1994 int128_get64(tmp
.size
));
1998 flatview_unref(view
);
2001 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
2005 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2006 memory_region_update_coalesced_range_as(mr
, as
);
2010 void memory_region_set_coalescing(MemoryRegion
*mr
)
2012 memory_region_clear_coalescing(mr
);
2013 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
2016 void memory_region_add_coalescing(MemoryRegion
*mr
,
2020 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
2022 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
2023 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
2024 memory_region_update_coalesced_range(mr
);
2025 memory_region_set_flush_coalesced(mr
);
2028 void memory_region_clear_coalescing(MemoryRegion
*mr
)
2030 CoalescedMemoryRange
*cmr
;
2031 bool updated
= false;
2033 qemu_flush_coalesced_mmio_buffer();
2034 mr
->flush_coalesced_mmio
= false;
2036 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
2037 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
2038 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
2044 memory_region_update_coalesced_range(mr
);
2048 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
2050 mr
->flush_coalesced_mmio
= true;
2053 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
2055 qemu_flush_coalesced_mmio_buffer();
2056 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2057 mr
->flush_coalesced_mmio
= false;
2061 void memory_region_set_global_locking(MemoryRegion
*mr
)
2063 mr
->global_locking
= true;
2066 void memory_region_clear_global_locking(MemoryRegion
*mr
)
2068 mr
->global_locking
= false;
2071 static bool userspace_eventfd_warning
;
2073 void memory_region_add_eventfd(MemoryRegion
*mr
,
2080 MemoryRegionIoeventfd mrfd
= {
2081 .addr
.start
= int128_make64(addr
),
2082 .addr
.size
= int128_make64(size
),
2083 .match_data
= match_data
,
2089 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2090 userspace_eventfd_warning
))) {
2091 userspace_eventfd_warning
= true;
2092 error_report("Using eventfd without MMIO binding in KVM. "
2093 "Suboptimal performance expected");
2097 adjust_endianness(mr
, &mrfd
.data
, size
);
2099 memory_region_transaction_begin();
2100 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2101 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
2106 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2107 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2108 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2109 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2110 mr
->ioeventfds
[i
] = mrfd
;
2111 ioeventfd_update_pending
|= mr
->enabled
;
2112 memory_region_transaction_commit();
2115 void memory_region_del_eventfd(MemoryRegion
*mr
,
2122 MemoryRegionIoeventfd mrfd
= {
2123 .addr
.start
= int128_make64(addr
),
2124 .addr
.size
= int128_make64(size
),
2125 .match_data
= match_data
,
2132 adjust_endianness(mr
, &mrfd
.data
, size
);
2134 memory_region_transaction_begin();
2135 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2136 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
2140 assert(i
!= mr
->ioeventfd_nb
);
2141 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2142 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2144 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2145 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2146 ioeventfd_update_pending
|= mr
->enabled
;
2147 memory_region_transaction_commit();
2150 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2152 MemoryRegion
*mr
= subregion
->container
;
2153 MemoryRegion
*other
;
2155 memory_region_transaction_begin();
2157 memory_region_ref(subregion
);
2158 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2159 if (subregion
->priority
>= other
->priority
) {
2160 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2164 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2166 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2167 memory_region_transaction_commit();
2170 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2172 MemoryRegion
*subregion
)
2174 assert(!subregion
->container
);
2175 subregion
->container
= mr
;
2176 subregion
->addr
= offset
;
2177 memory_region_update_container_subregions(subregion
);
2180 void memory_region_add_subregion(MemoryRegion
*mr
,
2182 MemoryRegion
*subregion
)
2184 subregion
->priority
= 0;
2185 memory_region_add_subregion_common(mr
, offset
, subregion
);
2188 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2190 MemoryRegion
*subregion
,
2193 subregion
->priority
= priority
;
2194 memory_region_add_subregion_common(mr
, offset
, subregion
);
2197 void memory_region_del_subregion(MemoryRegion
*mr
,
2198 MemoryRegion
*subregion
)
2200 memory_region_transaction_begin();
2201 assert(subregion
->container
== mr
);
2202 subregion
->container
= NULL
;
2203 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2204 memory_region_unref(subregion
);
2205 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2206 memory_region_transaction_commit();
2209 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2211 if (enabled
== mr
->enabled
) {
2214 memory_region_transaction_begin();
2215 mr
->enabled
= enabled
;
2216 memory_region_update_pending
= true;
2217 memory_region_transaction_commit();
2220 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2222 Int128 s
= int128_make64(size
);
2224 if (size
== UINT64_MAX
) {
2227 if (int128_eq(s
, mr
->size
)) {
2230 memory_region_transaction_begin();
2232 memory_region_update_pending
= true;
2233 memory_region_transaction_commit();
2236 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2238 MemoryRegion
*container
= mr
->container
;
2241 memory_region_transaction_begin();
2242 memory_region_ref(mr
);
2243 memory_region_del_subregion(container
, mr
);
2244 mr
->container
= container
;
2245 memory_region_update_container_subregions(mr
);
2246 memory_region_unref(mr
);
2247 memory_region_transaction_commit();
2251 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2253 if (addr
!= mr
->addr
) {
2255 memory_region_readd_subregion(mr
);
2259 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2263 if (offset
== mr
->alias_offset
) {
2267 memory_region_transaction_begin();
2268 mr
->alias_offset
= offset
;
2269 memory_region_update_pending
|= mr
->enabled
;
2270 memory_region_transaction_commit();
2273 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2278 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2280 const AddrRange
*addr
= addr_
;
2281 const FlatRange
*fr
= fr_
;
2283 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2285 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2291 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2293 return bsearch(&addr
, view
->ranges
, view
->nr
,
2294 sizeof(FlatRange
), cmp_flatrange_addr
);
2297 bool memory_region_is_mapped(MemoryRegion
*mr
)
2299 return mr
->container
? true : false;
2302 /* Same as memory_region_find, but it does not add a reference to the
2303 * returned region. It must be called from an RCU critical section.
2305 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2306 hwaddr addr
, uint64_t size
)
2308 MemoryRegionSection ret
= { .mr
= NULL
};
2316 for (root
= mr
; root
->container
; ) {
2317 root
= root
->container
;
2321 as
= memory_region_to_address_space(root
);
2325 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2327 view
= address_space_to_flatview(as
);
2328 fr
= flatview_lookup(view
, range
);
2333 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2339 range
= addrrange_intersection(range
, fr
->addr
);
2340 ret
.offset_within_region
= fr
->offset_in_region
;
2341 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2343 ret
.size
= range
.size
;
2344 ret
.offset_within_address_space
= int128_get64(range
.start
);
2345 ret
.readonly
= fr
->readonly
;
2349 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2350 hwaddr addr
, uint64_t size
)
2352 MemoryRegionSection ret
;
2354 ret
= memory_region_find_rcu(mr
, addr
, size
);
2356 memory_region_ref(ret
.mr
);
2362 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2367 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2369 return mr
&& mr
!= container
;
2372 void memory_global_dirty_log_sync(void)
2374 MemoryListener
*listener
;
2379 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2380 if (!listener
->log_sync
) {
2383 as
= listener
->address_space
;
2384 view
= address_space_get_flatview(as
);
2385 FOR_EACH_FLAT_RANGE(fr
, view
) {
2386 if (fr
->dirty_log_mask
) {
2387 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
2389 listener
->log_sync(listener
, &mrs
);
2392 flatview_unref(view
);
2396 static VMChangeStateEntry
*vmstate_change
;
2398 void memory_global_dirty_log_start(void)
2400 if (vmstate_change
) {
2401 qemu_del_vm_change_state_handler(vmstate_change
);
2402 vmstate_change
= NULL
;
2405 global_dirty_log
= true;
2407 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2409 /* Refresh DIRTY_LOG_MIGRATION bit. */
2410 memory_region_transaction_begin();
2411 memory_region_update_pending
= true;
2412 memory_region_transaction_commit();
2415 static void memory_global_dirty_log_do_stop(void)
2417 global_dirty_log
= false;
2419 /* Refresh DIRTY_LOG_MIGRATION bit. */
2420 memory_region_transaction_begin();
2421 memory_region_update_pending
= true;
2422 memory_region_transaction_commit();
2424 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2427 static void memory_vm_change_state_handler(void *opaque
, int running
,
2431 memory_global_dirty_log_do_stop();
2433 if (vmstate_change
) {
2434 qemu_del_vm_change_state_handler(vmstate_change
);
2435 vmstate_change
= NULL
;
2440 void memory_global_dirty_log_stop(void)
2442 if (!runstate_is_running()) {
2443 if (vmstate_change
) {
2446 vmstate_change
= qemu_add_vm_change_state_handler(
2447 memory_vm_change_state_handler
, NULL
);
2451 memory_global_dirty_log_do_stop();
2454 static void listener_add_address_space(MemoryListener
*listener
,
2460 if (listener
->begin
) {
2461 listener
->begin(listener
);
2463 if (global_dirty_log
) {
2464 if (listener
->log_global_start
) {
2465 listener
->log_global_start(listener
);
2469 view
= address_space_get_flatview(as
);
2470 FOR_EACH_FLAT_RANGE(fr
, view
) {
2471 MemoryRegionSection section
= {
2474 .offset_within_region
= fr
->offset_in_region
,
2475 .size
= fr
->addr
.size
,
2476 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
2477 .readonly
= fr
->readonly
,
2479 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2480 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2482 if (listener
->region_add
) {
2483 listener
->region_add(listener
, §ion
);
2486 if (listener
->commit
) {
2487 listener
->commit(listener
);
2489 flatview_unref(view
);
2492 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2494 MemoryListener
*other
= NULL
;
2496 listener
->address_space
= as
;
2497 if (QTAILQ_EMPTY(&memory_listeners
)
2498 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
,
2499 memory_listeners
)->priority
) {
2500 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2502 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2503 if (listener
->priority
< other
->priority
) {
2507 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2510 if (QTAILQ_EMPTY(&as
->listeners
)
2511 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
,
2512 memory_listeners
)->priority
) {
2513 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
2515 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
2516 if (listener
->priority
< other
->priority
) {
2520 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
2523 listener_add_address_space(listener
, as
);
2526 void memory_listener_unregister(MemoryListener
*listener
)
2528 if (!listener
->address_space
) {
2532 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2533 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
2534 listener
->address_space
= NULL
;
2537 bool memory_region_request_mmio_ptr(MemoryRegion
*mr
, hwaddr addr
)
2541 unsigned offset
= 0;
2542 Object
*new_interface
;
2544 if (!mr
|| !mr
->ops
->request_ptr
) {
2549 * Avoid an update if the request_ptr call
2550 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2553 memory_region_transaction_begin();
2555 host
= mr
->ops
->request_ptr(mr
->opaque
, addr
- mr
->addr
, &size
, &offset
);
2557 if (!host
|| !size
) {
2558 memory_region_transaction_commit();
2562 new_interface
= object_new("mmio_interface");
2563 qdev_prop_set_uint64(DEVICE(new_interface
), "start", offset
);
2564 qdev_prop_set_uint64(DEVICE(new_interface
), "end", offset
+ size
- 1);
2565 qdev_prop_set_bit(DEVICE(new_interface
), "ro", true);
2566 qdev_prop_set_ptr(DEVICE(new_interface
), "host_ptr", host
);
2567 qdev_prop_set_ptr(DEVICE(new_interface
), "subregion", mr
);
2568 object_property_set_bool(OBJECT(new_interface
), true, "realized", NULL
);
2570 memory_region_transaction_commit();
2574 typedef struct MMIOPtrInvalidate
{
2580 } MMIOPtrInvalidate
;
2582 #define MAX_MMIO_INVALIDATE 10
2583 static MMIOPtrInvalidate mmio_ptr_invalidate_list
[MAX_MMIO_INVALIDATE
];
2585 static void memory_region_do_invalidate_mmio_ptr(CPUState
*cpu
,
2586 run_on_cpu_data data
)
2588 MMIOPtrInvalidate
*invalidate_data
= (MMIOPtrInvalidate
*)data
.host_ptr
;
2589 MemoryRegion
*mr
= invalidate_data
->mr
;
2590 hwaddr offset
= invalidate_data
->offset
;
2591 unsigned size
= invalidate_data
->size
;
2592 MemoryRegionSection section
= memory_region_find(mr
, offset
, size
);
2594 qemu_mutex_lock_iothread();
2596 /* Reset dirty so this doesn't happen later. */
2597 cpu_physical_memory_test_and_clear_dirty(offset
, size
, 1);
2599 if (section
.mr
!= mr
) {
2600 /* memory_region_find add a ref on section.mr */
2601 memory_region_unref(section
.mr
);
2602 if (MMIO_INTERFACE(section
.mr
->owner
)) {
2603 /* We found the interface just drop it. */
2604 object_property_set_bool(section
.mr
->owner
, false, "realized",
2606 object_unref(section
.mr
->owner
);
2607 object_unparent(section
.mr
->owner
);
2611 qemu_mutex_unlock_iothread();
2613 if (invalidate_data
->allocated
) {
2614 g_free(invalidate_data
);
2616 invalidate_data
->busy
= 0;
2620 void memory_region_invalidate_mmio_ptr(MemoryRegion
*mr
, hwaddr offset
,
2624 MMIOPtrInvalidate
*invalidate_data
= NULL
;
2626 for (i
= 0; i
< MAX_MMIO_INVALIDATE
; i
++) {
2627 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list
[i
].busy
), 0, 1) == 0) {
2628 invalidate_data
= &mmio_ptr_invalidate_list
[i
];
2633 if (!invalidate_data
) {
2634 invalidate_data
= g_malloc0(sizeof(MMIOPtrInvalidate
));
2635 invalidate_data
->allocated
= 1;
2638 invalidate_data
->mr
= mr
;
2639 invalidate_data
->offset
= offset
;
2640 invalidate_data
->size
= size
;
2642 async_safe_run_on_cpu(first_cpu
, memory_region_do_invalidate_mmio_ptr
,
2643 RUN_ON_CPU_HOST_PTR(invalidate_data
));
2646 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2648 memory_region_ref(root
);
2649 memory_region_transaction_begin();
2652 as
->malloced
= false;
2653 as
->current_map
= flatview_new();
2654 as
->ioeventfd_nb
= 0;
2655 as
->ioeventfds
= NULL
;
2656 QTAILQ_INIT(&as
->listeners
);
2657 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2658 as
->name
= g_strdup(name
? name
: "anonymous");
2659 memory_region_update_pending
|= root
->enabled
;
2660 memory_region_transaction_commit();
2663 static void do_address_space_destroy(AddressSpace
*as
)
2665 bool do_free
= as
->malloced
;
2667 assert(QTAILQ_EMPTY(&as
->listeners
));
2669 flatview_unref(as
->current_map
);
2671 g_free(as
->ioeventfds
);
2672 memory_region_unref(as
->root
);
2678 AddressSpace
*address_space_init_shareable(MemoryRegion
*root
, const char *name
)
2682 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2683 if (root
== as
->root
&& as
->malloced
) {
2689 as
= g_malloc0(sizeof *as
);
2690 address_space_init(as
, root
, name
);
2691 as
->malloced
= true;
2695 void address_space_destroy(AddressSpace
*as
)
2697 MemoryRegion
*root
= as
->root
;
2700 if (as
->ref_count
) {
2703 /* Flush out anything from MemoryListeners listening in on this */
2704 memory_region_transaction_begin();
2706 memory_region_transaction_commit();
2707 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2709 /* At this point, as->dispatch and as->current_map are dummy
2710 * entries that the guest should never use. Wait for the old
2711 * values to expire before freeing the data.
2714 call_rcu(as
, do_address_space_destroy
, rcu
);
2717 static const char *memory_region_type(MemoryRegion
*mr
)
2719 if (memory_region_is_ram_device(mr
)) {
2721 } else if (memory_region_is_romd(mr
)) {
2723 } else if (memory_region_is_rom(mr
)) {
2725 } else if (memory_region_is_ram(mr
)) {
2732 typedef struct MemoryRegionList MemoryRegionList
;
2734 struct MemoryRegionList
{
2735 const MemoryRegion
*mr
;
2736 QTAILQ_ENTRY(MemoryRegionList
) mrqueue
;
2739 typedef QTAILQ_HEAD(mrqueue
, MemoryRegionList
) MemoryRegionListHead
;
2741 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2742 int128_sub((size), int128_one())) : 0)
2743 #define MTREE_INDENT " "
2745 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
2746 const MemoryRegion
*mr
, unsigned int level
,
2748 MemoryRegionListHead
*alias_print_queue
)
2750 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2751 MemoryRegionListHead submr_print_queue
;
2752 const MemoryRegion
*submr
;
2754 hwaddr cur_start
, cur_end
;
2760 for (i
= 0; i
< level
; i
++) {
2761 mon_printf(f
, MTREE_INDENT
);
2764 cur_start
= base
+ mr
->addr
;
2765 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
2768 * Try to detect overflow of memory region. This should never
2769 * happen normally. When it happens, we dump something to warn the
2770 * user who is observing this.
2772 if (cur_start
< base
|| cur_end
< cur_start
) {
2773 mon_printf(f
, "[DETECTED OVERFLOW!] ");
2777 MemoryRegionList
*ml
;
2780 /* check if the alias is already in the queue */
2781 QTAILQ_FOREACH(ml
, alias_print_queue
, mrqueue
) {
2782 if (ml
->mr
== mr
->alias
) {
2788 ml
= g_new(MemoryRegionList
, 1);
2790 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, mrqueue
);
2792 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
2793 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2794 "-" TARGET_FMT_plx
"%s\n",
2797 memory_region_type((MemoryRegion
*)mr
),
2798 memory_region_name(mr
),
2799 memory_region_name(mr
->alias
),
2801 mr
->alias_offset
+ MR_SIZE(mr
->size
),
2802 mr
->enabled
? "" : " [disabled]");
2805 TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d, %s): %s%s\n",
2808 memory_region_type((MemoryRegion
*)mr
),
2809 memory_region_name(mr
),
2810 mr
->enabled
? "" : " [disabled]");
2813 QTAILQ_INIT(&submr_print_queue
);
2815 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2816 new_ml
= g_new(MemoryRegionList
, 1);
2818 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
2819 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2820 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2821 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2822 QTAILQ_INSERT_BEFORE(ml
, new_ml
, mrqueue
);
2828 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, mrqueue
);
2832 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
2833 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, cur_start
,
2837 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, mrqueue
, next_ml
) {
2842 static void mtree_print_flatview(fprintf_function p
, void *f
,
2845 FlatView
*view
= address_space_get_flatview(as
);
2846 FlatRange
*range
= &view
->ranges
[0];
2851 p(f
, MTREE_INDENT
"No rendered FlatView for "
2852 "address space '%s'\n", as
->name
);
2853 flatview_unref(view
);
2859 if (range
->offset_in_region
) {
2860 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
2861 TARGET_FMT_plx
" (prio %d, %s): %s @" TARGET_FMT_plx
"\n",
2862 int128_get64(range
->addr
.start
),
2863 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
2865 range
->readonly
? "rom" : memory_region_type(mr
),
2866 memory_region_name(mr
),
2867 range
->offset_in_region
);
2869 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
2870 TARGET_FMT_plx
" (prio %d, %s): %s\n",
2871 int128_get64(range
->addr
.start
),
2872 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
2874 range
->readonly
? "rom" : memory_region_type(mr
),
2875 memory_region_name(mr
));
2880 flatview_unref(view
);
2883 void mtree_info(fprintf_function mon_printf
, void *f
, bool flatview
)
2885 MemoryRegionListHead ml_head
;
2886 MemoryRegionList
*ml
, *ml2
;
2890 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2891 mon_printf(f
, "address-space (flat view): %s\n", as
->name
);
2892 mtree_print_flatview(mon_printf
, f
, as
);
2893 mon_printf(f
, "\n");
2898 QTAILQ_INIT(&ml_head
);
2900 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2901 mon_printf(f
, "address-space: %s\n", as
->name
);
2902 mtree_print_mr(mon_printf
, f
, as
->root
, 1, 0, &ml_head
);
2903 mon_printf(f
, "\n");
2906 /* print aliased regions */
2907 QTAILQ_FOREACH(ml
, &ml_head
, mrqueue
) {
2908 mon_printf(f
, "memory-region: %s\n", memory_region_name(ml
->mr
));
2909 mtree_print_mr(mon_printf
, f
, ml
->mr
, 1, 0, &ml_head
);
2910 mon_printf(f
, "\n");
2913 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, mrqueue
, ml2
) {
2918 void memory_region_init_ram(MemoryRegion
*mr
,
2919 struct Object
*owner
,
2924 DeviceState
*owner_dev
;
2927 memory_region_init_ram_nomigrate(mr
, owner
, name
, size
, &err
);
2929 error_propagate(errp
, err
);
2932 /* This will assert if owner is neither NULL nor a DeviceState.
2933 * We only want the owner here for the purposes of defining a
2934 * unique name for migration. TODO: Ideally we should implement
2935 * a naming scheme for Objects which are not DeviceStates, in
2936 * which case we can relax this restriction.
2938 owner_dev
= DEVICE(owner
);
2939 vmstate_register_ram(mr
, owner_dev
);
2942 void memory_region_init_rom(MemoryRegion
*mr
,
2943 struct Object
*owner
,
2948 DeviceState
*owner_dev
;
2951 memory_region_init_rom_nomigrate(mr
, owner
, name
, size
, &err
);
2953 error_propagate(errp
, err
);
2956 /* This will assert if owner is neither NULL nor a DeviceState.
2957 * We only want the owner here for the purposes of defining a
2958 * unique name for migration. TODO: Ideally we should implement
2959 * a naming scheme for Objects which are not DeviceStates, in
2960 * which case we can relax this restriction.
2962 owner_dev
= DEVICE(owner
);
2963 vmstate_register_ram(mr
, owner_dev
);
2966 void memory_region_init_rom_device(MemoryRegion
*mr
,
2967 struct Object
*owner
,
2968 const MemoryRegionOps
*ops
,
2974 DeviceState
*owner_dev
;
2977 memory_region_init_rom_device_nomigrate(mr
, owner
, ops
, opaque
,
2980 error_propagate(errp
, err
);
2983 /* This will assert if owner is neither NULL nor a DeviceState.
2984 * We only want the owner here for the purposes of defining a
2985 * unique name for migration. TODO: Ideally we should implement
2986 * a naming scheme for Objects which are not DeviceStates, in
2987 * which case we can relax this restriction.
2989 owner_dev
= DEVICE(owner
);
2990 vmstate_register_ram(mr
, owner_dev
);
2993 static const TypeInfo memory_region_info
= {
2994 .parent
= TYPE_OBJECT
,
2995 .name
= TYPE_MEMORY_REGION
,
2996 .instance_size
= sizeof(MemoryRegion
),
2997 .instance_init
= memory_region_initfn
,
2998 .instance_finalize
= memory_region_finalize
,
3001 static const TypeInfo iommu_memory_region_info
= {
3002 .parent
= TYPE_MEMORY_REGION
,
3003 .name
= TYPE_IOMMU_MEMORY_REGION
,
3004 .class_size
= sizeof(IOMMUMemoryRegionClass
),
3005 .instance_size
= sizeof(IOMMUMemoryRegion
),
3006 .instance_init
= iommu_memory_region_initfn
,
3010 static void memory_register_types(void)
3012 type_register_static(&memory_region_info
);
3013 type_register_static(&iommu_memory_region_info
);
3016 type_init(memory_register_types
)