2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
20 #include "exec/exec-all.h" /* qemu_sprint_backtrace */
21 #include "exec/memory.h"
22 #include "exec/address-spaces.h"
23 #include "exec/ioport.h"
24 #include "qapi/visitor.h"
25 #include "qemu/bitops.h"
26 #include "qemu/error-report.h"
27 #include "qom/object.h"
30 #include "exec/memory-internal.h"
31 #include "exec/ram_addr.h"
32 #include "sysemu/kvm.h"
33 #include "sysemu/sysemu.h"
35 //#define DEBUG_UNASSIGNED
37 static unsigned memory_region_transaction_depth
;
38 static bool memory_region_update_pending
;
39 static bool ioeventfd_update_pending
;
40 static bool global_dirty_log
= false;
42 static QTAILQ_HEAD(memory_listeners
, MemoryListener
) memory_listeners
43 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
45 static QTAILQ_HEAD(, AddressSpace
) address_spaces
46 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
48 typedef struct AddrRange AddrRange
;
51 * Note that signed integers are needed for negative offsetting in aliases
52 * (large MemoryRegion::alias_offset).
59 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
61 return (AddrRange
) { start
, size
};
64 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
66 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
69 static Int128
addrrange_end(AddrRange r
)
71 return int128_add(r
.start
, r
.size
);
74 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
76 int128_addto(&range
.start
, delta
);
80 static bool addrrange_contains(AddrRange range
, Int128 addr
)
82 return int128_ge(addr
, range
.start
)
83 && int128_lt(addr
, addrrange_end(range
));
86 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
88 return addrrange_contains(r1
, r2
.start
)
89 || addrrange_contains(r2
, r1
.start
);
92 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
94 Int128 start
= int128_max(r1
.start
, r2
.start
);
95 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
96 return addrrange_make(start
, int128_sub(end
, start
));
99 enum ListenerDirection
{ Forward
, Reverse
};
101 static bool memory_listener_match(MemoryListener
*listener
,
102 MemoryRegionSection
*section
)
104 return !listener
->address_space_filter
105 || listener
->address_space_filter
== section
->address_space
;
108 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
110 MemoryListener *_listener; \
112 switch (_direction) { \
114 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
115 if (_listener->_callback) { \
116 _listener->_callback(_listener, ##_args); \
121 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
122 memory_listeners, link) { \
123 if (_listener->_callback) { \
124 _listener->_callback(_listener, ##_args); \
133 #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
135 MemoryListener *_listener; \
137 switch (_direction) { \
139 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
140 if (_listener->_callback \
141 && memory_listener_match(_listener, _section)) { \
142 _listener->_callback(_listener, _section, ##_args); \
147 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
148 memory_listeners, link) { \
149 if (_listener->_callback \
150 && memory_listener_match(_listener, _section)) { \
151 _listener->_callback(_listener, _section, ##_args); \
160 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
161 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
162 MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
164 .address_space = (as), \
165 .offset_within_region = (fr)->offset_in_region, \
166 .size = (fr)->addr.size, \
167 .offset_within_address_space = int128_get64((fr)->addr.start), \
168 .readonly = (fr)->readonly, \
171 struct CoalescedMemoryRange
{
173 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
176 struct MemoryRegionIoeventfd
{
183 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
184 MemoryRegionIoeventfd b
)
186 if (int128_lt(a
.addr
.start
, b
.addr
.start
)) {
188 } else if (int128_gt(a
.addr
.start
, b
.addr
.start
)) {
190 } else if (int128_lt(a
.addr
.size
, b
.addr
.size
)) {
192 } else if (int128_gt(a
.addr
.size
, b
.addr
.size
)) {
194 } else if (a
.match_data
< b
.match_data
) {
196 } else if (a
.match_data
> b
.match_data
) {
198 } else if (a
.match_data
) {
199 if (a
.data
< b
.data
) {
201 } else if (a
.data
> b
.data
) {
207 } else if (a
.e
> b
.e
) {
213 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
214 MemoryRegionIoeventfd b
)
216 return !memory_region_ioeventfd_before(a
, b
)
217 && !memory_region_ioeventfd_before(b
, a
);
220 typedef struct FlatRange FlatRange
;
221 typedef struct FlatView FlatView
;
223 /* Range of memory in the global map. Addresses are absolute. */
226 hwaddr offset_in_region
;
228 uint8_t dirty_log_mask
;
233 /* Flattened global view of current active memory hierarchy. Kept in sorted
241 unsigned nr_allocated
;
244 typedef struct AddressSpaceOps AddressSpaceOps
;
246 #define FOR_EACH_FLAT_RANGE(var, view) \
247 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
249 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
251 return a
->mr
== b
->mr
252 && addrrange_equal(a
->addr
, b
->addr
)
253 && a
->offset_in_region
== b
->offset_in_region
254 && a
->romd_mode
== b
->romd_mode
255 && a
->readonly
== b
->readonly
;
258 static void flatview_init(FlatView
*view
)
263 view
->nr_allocated
= 0;
266 /* Insert a range into a given position. Caller is responsible for maintaining
269 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
271 if (view
->nr
== view
->nr_allocated
) {
272 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
273 view
->ranges
= g_realloc(view
->ranges
,
274 view
->nr_allocated
* sizeof(*view
->ranges
));
276 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
277 (view
->nr
- pos
) * sizeof(FlatRange
));
278 view
->ranges
[pos
] = *range
;
279 memory_region_ref(range
->mr
);
283 static void flatview_destroy(FlatView
*view
)
287 for (i
= 0; i
< view
->nr
; i
++) {
288 memory_region_unref(view
->ranges
[i
].mr
);
290 g_free(view
->ranges
);
294 static void flatview_ref(FlatView
*view
)
296 atomic_inc(&view
->ref
);
299 static void flatview_unref(FlatView
*view
)
301 if (atomic_fetch_dec(&view
->ref
) == 1) {
302 flatview_destroy(view
);
306 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
308 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
310 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
312 int128_make64(r2
->offset_in_region
))
313 && r1
->dirty_log_mask
== r2
->dirty_log_mask
314 && r1
->romd_mode
== r2
->romd_mode
315 && r1
->readonly
== r2
->readonly
;
318 /* Attempt to simplify a view by merging adjacent ranges */
319 static void flatview_simplify(FlatView
*view
)
324 while (i
< view
->nr
) {
327 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
328 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
332 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
333 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
338 static bool memory_region_big_endian(MemoryRegion
*mr
)
340 #ifdef TARGET_WORDS_BIGENDIAN
341 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
343 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
347 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
349 #ifdef TARGET_WORDS_BIGENDIAN
350 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
352 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
356 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
358 if (memory_region_wrong_endianness(mr
)) {
363 *data
= bswap16(*data
);
366 *data
= bswap32(*data
);
369 *data
= bswap64(*data
);
377 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
380 hwaddr abs_addr
= offset
;
382 abs_addr
+= mr
->addr
;
383 for (root
= mr
; root
->container
; ) {
384 root
= root
->container
;
385 abs_addr
+= root
->addr
;
391 static int get_cpu_index(void)
394 return current_cpu
->cpu_index
;
399 static MemTxResult
memory_region_oldmmio_read_accessor(MemoryRegion
*mr
,
409 tmp
= mr
->ops
->old_mmio
.read
[ctz32(size
)](mr
->opaque
, addr
);
411 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
412 } else if (mr
== &io_mem_notdirty
) {
413 /* Accesses to code which has previously been translated into a TB show
414 * up in the MMIO path, as accesses to the io_mem_notdirty
416 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
417 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
418 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
419 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
421 *value
|= (tmp
& mask
) << shift
;
425 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
435 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
437 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
438 } else if (mr
== &io_mem_notdirty
) {
439 /* Accesses to code which has previously been translated into a TB show
440 * up in the MMIO path, as accesses to the io_mem_notdirty
442 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
443 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
444 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
445 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
447 *value
|= (tmp
& mask
) << shift
;
451 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
462 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
464 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
465 } else if (mr
== &io_mem_notdirty
) {
466 /* Accesses to code which has previously been translated into a TB show
467 * up in the MMIO path, as accesses to the io_mem_notdirty
469 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
470 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
471 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
472 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
474 *value
|= (tmp
& mask
) << shift
;
478 static MemTxResult
memory_region_oldmmio_write_accessor(MemoryRegion
*mr
,
488 tmp
= (*value
>> shift
) & mask
;
490 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
491 } else if (mr
== &io_mem_notdirty
) {
492 /* Accesses to code which has previously been translated into a TB show
493 * up in the MMIO path, as accesses to the io_mem_notdirty
495 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
496 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
497 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
498 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
500 mr
->ops
->old_mmio
.write
[ctz32(size
)](mr
->opaque
, addr
, tmp
);
504 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
514 tmp
= (*value
>> shift
) & mask
;
516 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
517 } else if (mr
== &io_mem_notdirty
) {
518 /* Accesses to code which has previously been translated into a TB show
519 * up in the MMIO path, as accesses to the io_mem_notdirty
521 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
522 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
523 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
524 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
526 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
530 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
540 tmp
= (*value
>> shift
) & mask
;
542 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
543 } else if (mr
== &io_mem_notdirty
) {
544 /* Accesses to code which has previously been translated into a TB show
545 * up in the MMIO path, as accesses to the io_mem_notdirty
547 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
548 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
549 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
550 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
552 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
555 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
558 unsigned access_size_min
,
559 unsigned access_size_max
,
560 MemTxResult (*access
)(MemoryRegion
*mr
,
570 uint64_t access_mask
;
571 unsigned access_size
;
573 MemTxResult r
= MEMTX_OK
;
575 if (!access_size_min
) {
578 if (!access_size_max
) {
582 /* FIXME: support unaligned access? */
583 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
584 access_mask
= -1ULL >> (64 - access_size
* 8);
585 if (memory_region_big_endian(mr
)) {
586 for (i
= 0; i
< size
; i
+= access_size
) {
587 r
|= access(mr
, addr
+ i
, value
, access_size
,
588 (size
- access_size
- i
) * 8, access_mask
, attrs
);
591 for (i
= 0; i
< size
; i
+= access_size
) {
592 r
|= access(mr
, addr
+ i
, value
, access_size
, i
* 8,
599 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
603 while (mr
->container
) {
606 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
607 if (mr
== as
->root
) {
614 /* Render a memory region into the global view. Ranges in @view obscure
617 static void render_memory_region(FlatView
*view
,
623 MemoryRegion
*subregion
;
625 hwaddr offset_in_region
;
635 int128_addto(&base
, int128_make64(mr
->addr
));
636 readonly
|= mr
->readonly
;
638 tmp
= addrrange_make(base
, mr
->size
);
640 if (!addrrange_intersects(tmp
, clip
)) {
644 clip
= addrrange_intersection(tmp
, clip
);
647 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
648 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
649 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
653 /* Render subregions in priority order. */
654 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
655 render_memory_region(view
, subregion
, base
, clip
, readonly
);
658 if (!mr
->terminates
) {
662 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
667 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
668 fr
.romd_mode
= mr
->romd_mode
;
669 fr
.readonly
= readonly
;
671 /* Render the region itself into any gaps left by the current view. */
672 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
673 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
676 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
677 now
= int128_min(remain
,
678 int128_sub(view
->ranges
[i
].addr
.start
, base
));
679 fr
.offset_in_region
= offset_in_region
;
680 fr
.addr
= addrrange_make(base
, now
);
681 flatview_insert(view
, i
, &fr
);
683 int128_addto(&base
, now
);
684 offset_in_region
+= int128_get64(now
);
685 int128_subfrom(&remain
, now
);
687 now
= int128_sub(int128_min(int128_add(base
, remain
),
688 addrrange_end(view
->ranges
[i
].addr
)),
690 int128_addto(&base
, now
);
691 offset_in_region
+= int128_get64(now
);
692 int128_subfrom(&remain
, now
);
694 if (int128_nz(remain
)) {
695 fr
.offset_in_region
= offset_in_region
;
696 fr
.addr
= addrrange_make(base
, remain
);
697 flatview_insert(view
, i
, &fr
);
701 /* Render a memory topology into a list of disjoint absolute ranges. */
702 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
706 view
= g_new(FlatView
, 1);
710 render_memory_region(view
, mr
, int128_zero(),
711 addrrange_make(int128_zero(), int128_2_64()), false);
713 flatview_simplify(view
);
718 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
719 MemoryRegionIoeventfd
*fds_new
,
721 MemoryRegionIoeventfd
*fds_old
,
725 MemoryRegionIoeventfd
*fd
;
726 MemoryRegionSection section
;
728 /* Generate a symmetric difference of the old and new fd sets, adding
729 * and deleting as necessary.
733 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
734 if (iold
< fds_old_nb
735 && (inew
== fds_new_nb
736 || memory_region_ioeventfd_before(fds_old
[iold
],
739 section
= (MemoryRegionSection
) {
741 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
742 .size
= fd
->addr
.size
,
744 MEMORY_LISTENER_CALL(eventfd_del
, Forward
, §ion
,
745 fd
->match_data
, fd
->data
, fd
->e
);
747 } else if (inew
< fds_new_nb
748 && (iold
== fds_old_nb
749 || memory_region_ioeventfd_before(fds_new
[inew
],
752 section
= (MemoryRegionSection
) {
754 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
755 .size
= fd
->addr
.size
,
757 MEMORY_LISTENER_CALL(eventfd_add
, Reverse
, §ion
,
758 fd
->match_data
, fd
->data
, fd
->e
);
767 static FlatView
*address_space_get_flatview(AddressSpace
*as
)
772 view
= atomic_rcu_read(&as
->current_map
);
778 static void address_space_update_ioeventfds(AddressSpace
*as
)
782 unsigned ioeventfd_nb
= 0;
783 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
787 view
= address_space_get_flatview(as
);
788 FOR_EACH_FLAT_RANGE(fr
, view
) {
789 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
790 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
791 int128_sub(fr
->addr
.start
,
792 int128_make64(fr
->offset_in_region
)));
793 if (addrrange_intersects(fr
->addr
, tmp
)) {
795 ioeventfds
= g_realloc(ioeventfds
,
796 ioeventfd_nb
* sizeof(*ioeventfds
));
797 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
798 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
803 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
804 as
->ioeventfds
, as
->ioeventfd_nb
);
806 g_free(as
->ioeventfds
);
807 as
->ioeventfds
= ioeventfds
;
808 as
->ioeventfd_nb
= ioeventfd_nb
;
809 flatview_unref(view
);
812 static void address_space_update_topology_pass(AddressSpace
*as
,
813 const FlatView
*old_view
,
814 const FlatView
*new_view
,
818 FlatRange
*frold
, *frnew
;
820 /* Generate a symmetric difference of the old and new memory maps.
821 * Kill ranges in the old map, and instantiate ranges in the new map.
824 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
825 if (iold
< old_view
->nr
) {
826 frold
= &old_view
->ranges
[iold
];
830 if (inew
< new_view
->nr
) {
831 frnew
= &new_view
->ranges
[inew
];
838 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
839 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
840 && !flatrange_equal(frold
, frnew
)))) {
841 /* In old but not in new, or in both but attributes changed. */
844 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
848 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
849 /* In both and unchanged (except logging may have changed) */
852 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
853 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
854 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
855 frold
->dirty_log_mask
,
856 frnew
->dirty_log_mask
);
858 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
859 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
860 frold
->dirty_log_mask
,
861 frnew
->dirty_log_mask
);
871 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
880 static void address_space_update_topology(AddressSpace
*as
)
882 FlatView
*old_view
= address_space_get_flatview(as
);
883 FlatView
*new_view
= generate_memory_topology(as
->root
);
885 address_space_update_topology_pass(as
, old_view
, new_view
, false);
886 address_space_update_topology_pass(as
, old_view
, new_view
, true);
888 /* Writes are protected by the BQL. */
889 atomic_rcu_set(&as
->current_map
, new_view
);
890 call_rcu(old_view
, flatview_unref
, rcu
);
892 /* Note that all the old MemoryRegions are still alive up to this
893 * point. This relieves most MemoryListeners from the need to
894 * ref/unref the MemoryRegions they get---unless they use them
895 * outside the iothread mutex, in which case precise reference
896 * counting is necessary.
898 flatview_unref(old_view
);
900 address_space_update_ioeventfds(as
);
903 void memory_region_transaction_begin(void)
905 qemu_flush_coalesced_mmio_buffer();
906 ++memory_region_transaction_depth
;
909 static void memory_region_clear_pending(void)
911 memory_region_update_pending
= false;
912 ioeventfd_update_pending
= false;
915 void memory_region_transaction_commit(void)
919 assert(memory_region_transaction_depth
);
920 --memory_region_transaction_depth
;
921 if (!memory_region_transaction_depth
) {
922 if (memory_region_update_pending
) {
923 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
925 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
926 address_space_update_topology(as
);
929 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
930 } else if (ioeventfd_update_pending
) {
931 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
932 address_space_update_ioeventfds(as
);
935 memory_region_clear_pending();
939 static void memory_region_destructor_none(MemoryRegion
*mr
)
943 static void memory_region_destructor_ram(MemoryRegion
*mr
)
945 qemu_ram_free(mr
->ram_block
);
948 static void memory_region_destructor_rom_device(MemoryRegion
*mr
)
950 qemu_ram_free(mr
->ram_block
);
953 static bool memory_region_need_escape(char c
)
955 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
958 static char *memory_region_escape_name(const char *name
)
965 for (p
= name
; *p
; p
++) {
966 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
968 if (bytes
== p
- name
) {
969 return g_memdup(name
, bytes
+ 1);
972 escaped
= g_malloc(bytes
+ 1);
973 for (p
= name
, q
= escaped
; *p
; p
++) {
975 if (unlikely(memory_region_need_escape(c
))) {
978 *q
++ = "0123456789abcdef"[c
>> 4];
979 c
= "0123456789abcdef"[c
& 15];
987 void memory_region_init(MemoryRegion
*mr
,
992 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
993 mr
->size
= int128_make64(size
);
994 if (size
== UINT64_MAX
) {
995 mr
->size
= int128_2_64();
997 mr
->name
= g_strdup(name
);
999 mr
->ram_block
= NULL
;
1002 char *escaped_name
= memory_region_escape_name(name
);
1003 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1006 owner
= container_get(qdev_get_machine(), "/unattached");
1009 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
1010 object_unref(OBJECT(mr
));
1012 g_free(escaped_name
);
1016 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, const char *name
,
1017 void *opaque
, Error
**errp
)
1019 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1020 uint64_t value
= mr
->addr
;
1022 visit_type_uint64(v
, name
, &value
, errp
);
1025 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1026 const char *name
, void *opaque
,
1029 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1030 gchar
*path
= (gchar
*)"";
1032 if (mr
->container
) {
1033 path
= object_get_canonical_path(OBJECT(mr
->container
));
1035 visit_type_str(v
, name
, &path
, errp
);
1036 if (mr
->container
) {
1041 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1044 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1046 return OBJECT(mr
->container
);
1049 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1050 const char *name
, void *opaque
,
1053 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1054 int32_t value
= mr
->priority
;
1056 visit_type_int32(v
, name
, &value
, errp
);
1059 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1060 void *opaque
, Error
**errp
)
1062 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1063 uint64_t value
= memory_region_size(mr
);
1065 visit_type_uint64(v
, name
, &value
, errp
);
1068 static void memory_region_initfn(Object
*obj
)
1070 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1073 mr
->ops
= &unassigned_mem_ops
;
1075 mr
->romd_mode
= true;
1076 mr
->global_locking
= true;
1077 mr
->destructor
= memory_region_destructor_none
;
1078 QTAILQ_INIT(&mr
->subregions
);
1079 QTAILQ_INIT(&mr
->coalesced
);
1081 op
= object_property_add(OBJECT(mr
), "container",
1082 "link<" TYPE_MEMORY_REGION
">",
1083 memory_region_get_container
,
1084 NULL
, /* memory_region_set_container */
1085 NULL
, NULL
, &error_abort
);
1086 op
->resolve
= memory_region_resolve_container
;
1088 object_property_add(OBJECT(mr
), "addr", "uint64",
1089 memory_region_get_addr
,
1090 NULL
, /* memory_region_set_addr */
1091 NULL
, NULL
, &error_abort
);
1092 object_property_add(OBJECT(mr
), "priority", "uint32",
1093 memory_region_get_priority
,
1094 NULL
, /* memory_region_set_priority */
1095 NULL
, NULL
, &error_abort
);
1096 object_property_add(OBJECT(mr
), "size", "uint64",
1097 memory_region_get_size
,
1098 NULL
, /* memory_region_set_size, */
1099 NULL
, NULL
, &error_abort
);
1102 static int qemu_target_backtrace(target_ulong
*array
, size_t size
)
1106 #if defined(TARGET_ARM)
1107 CPUArchState
*env
= current_cpu
->env_ptr
;
1108 array
[0] = env
->regs
[15];
1109 array
[1] = env
->regs
[14];
1110 #elif defined(TARGET_MIPS)
1111 CPUArchState
*env
= current_cpu
->env_ptr
;
1112 array
[0] = env
->active_tc
.PC
;
1113 array
[1] = env
->active_tc
.gpr
[31];
1123 #include "disas/disas.h"
1124 const char *qemu_sprint_backtrace(char *buffer
, size_t length
)
1128 target_ulong caller
[2];
1130 qemu_target_backtrace(caller
, 2);
1131 symbol
= lookup_symbol(caller
[0]);
1132 p
+= sprintf(p
, "[%s]", symbol
);
1133 symbol
= lookup_symbol(caller
[1]);
1134 p
+= sprintf(p
, "[%s]", symbol
);
1136 p
+= sprintf(p
, "[cpu not running]");
1138 assert((p
- buffer
) < length
);
1142 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1145 if (trace_unassigned
) {
1147 fprintf(stderr
, "Unassigned mem read " TARGET_FMT_plx
" %s\n",
1148 addr
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1151 if (current_cpu
!= NULL
) {
1152 cpu_unassigned_access(current_cpu
, addr
, false, false, 0, size
);
1157 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1158 uint64_t val
, unsigned size
)
1160 if (trace_unassigned
) {
1162 fprintf(stderr
, "Unassigned mem write " TARGET_FMT_plx
1163 " = 0x%" PRIx64
" %s\n",
1164 addr
, val
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1166 if (current_cpu
!= NULL
) {
1167 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1171 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1172 unsigned size
, bool is_write
)
1177 const MemoryRegionOps unassigned_mem_ops
= {
1178 .valid
.accepts
= unassigned_mem_accepts
,
1179 .endianness
= DEVICE_NATIVE_ENDIAN
,
1182 bool memory_region_access_valid(MemoryRegion
*mr
,
1187 int access_size_min
, access_size_max
;
1190 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1191 fprintf(stderr
, "Misaligned i/o to address %08" HWADDR_PRIx
1192 " with size %u for memory region %s\n",
1193 addr
, size
, mr
->name
);
1197 if (!mr
->ops
->valid
.accepts
) {
1201 access_size_min
= mr
->ops
->valid
.min_access_size
;
1202 if (!mr
->ops
->valid
.min_access_size
) {
1203 access_size_min
= 1;
1206 access_size_max
= mr
->ops
->valid
.max_access_size
;
1207 if (!mr
->ops
->valid
.max_access_size
) {
1208 access_size_max
= 4;
1211 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1212 for (i
= 0; i
< size
; i
+= access_size
) {
1213 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1222 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1230 if (mr
->ops
->read
) {
1231 return access_with_adjusted_size(addr
, pval
, size
,
1232 mr
->ops
->impl
.min_access_size
,
1233 mr
->ops
->impl
.max_access_size
,
1234 memory_region_read_accessor
,
1236 } else if (mr
->ops
->read_with_attrs
) {
1237 return access_with_adjusted_size(addr
, pval
, size
,
1238 mr
->ops
->impl
.min_access_size
,
1239 mr
->ops
->impl
.max_access_size
,
1240 memory_region_read_with_attrs_accessor
,
1243 return access_with_adjusted_size(addr
, pval
, size
, 1, 4,
1244 memory_region_oldmmio_read_accessor
,
1249 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1257 if (!memory_region_access_valid(mr
, addr
, size
, false)) {
1258 *pval
= unassigned_mem_read(mr
, addr
, size
);
1259 return MEMTX_DECODE_ERROR
;
1262 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1263 adjust_endianness(mr
, pval
, size
);
1267 /* Return true if an eventfd was signalled */
1268 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1274 MemoryRegionIoeventfd ioeventfd
= {
1275 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1280 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1281 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1282 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1284 if (memory_region_ioeventfd_equal(ioeventfd
, mr
->ioeventfds
[i
])) {
1285 event_notifier_set(ioeventfd
.e
);
1293 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1299 if (!memory_region_access_valid(mr
, addr
, size
, true)) {
1300 unassigned_mem_write(mr
, addr
, data
, size
);
1301 return MEMTX_DECODE_ERROR
;
1304 adjust_endianness(mr
, &data
, size
);
1306 if ((!kvm_eventfds_enabled()) &&
1307 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1311 if (mr
->ops
->write
) {
1312 return access_with_adjusted_size(addr
, &data
, size
,
1313 mr
->ops
->impl
.min_access_size
,
1314 mr
->ops
->impl
.max_access_size
,
1315 memory_region_write_accessor
, mr
,
1317 } else if (mr
->ops
->write_with_attrs
) {
1319 access_with_adjusted_size(addr
, &data
, size
,
1320 mr
->ops
->impl
.min_access_size
,
1321 mr
->ops
->impl
.max_access_size
,
1322 memory_region_write_with_attrs_accessor
,
1325 return access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1326 memory_region_oldmmio_write_accessor
,
1331 void memory_region_init_io(MemoryRegion
*mr
,
1333 const MemoryRegionOps
*ops
,
1338 memory_region_init(mr
, owner
, name
, size
);
1339 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1340 mr
->opaque
= opaque
;
1341 mr
->terminates
= true;
1344 void memory_region_init_ram(MemoryRegion
*mr
,
1350 memory_region_init(mr
, owner
, name
, size
);
1352 mr
->terminates
= true;
1353 mr
->destructor
= memory_region_destructor_ram
;
1354 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1355 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1358 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1363 void (*resized
)(const char*,
1368 memory_region_init(mr
, owner
, name
, size
);
1370 mr
->terminates
= true;
1371 mr
->destructor
= memory_region_destructor_ram
;
1372 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1374 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1378 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1379 struct Object
*owner
,
1386 memory_region_init(mr
, owner
, name
, size
);
1388 mr
->terminates
= true;
1389 mr
->destructor
= memory_region_destructor_ram
;
1390 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, share
, path
, errp
);
1391 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1395 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1401 memory_region_init(mr
, owner
, name
, size
);
1403 mr
->terminates
= true;
1404 mr
->destructor
= memory_region_destructor_ram
;
1405 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1407 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1408 assert(ptr
!= NULL
);
1409 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1412 void memory_region_set_skip_dump(MemoryRegion
*mr
)
1414 mr
->skip_dump
= true;
1417 void memory_region_init_alias(MemoryRegion
*mr
,
1424 memory_region_init(mr
, owner
, name
, size
);
1426 mr
->alias_offset
= offset
;
1429 void memory_region_init_rom(MemoryRegion
*mr
,
1430 struct Object
*owner
,
1435 memory_region_init(mr
, owner
, name
, size
);
1437 mr
->readonly
= true;
1438 mr
->terminates
= true;
1439 mr
->destructor
= memory_region_destructor_ram
;
1440 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1441 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1444 void memory_region_init_rom_device(MemoryRegion
*mr
,
1446 const MemoryRegionOps
*ops
,
1453 memory_region_init(mr
, owner
, name
, size
);
1455 mr
->opaque
= opaque
;
1456 mr
->terminates
= true;
1457 mr
->rom_device
= true;
1458 mr
->destructor
= memory_region_destructor_rom_device
;
1459 mr
->ram_block
= qemu_ram_alloc(size
, mr
, errp
);
1462 void memory_region_init_iommu(MemoryRegion
*mr
,
1464 const MemoryRegionIOMMUOps
*ops
,
1468 memory_region_init(mr
, owner
, name
, size
);
1469 mr
->iommu_ops
= ops
,
1470 mr
->terminates
= true; /* then re-forwards */
1471 notifier_list_init(&mr
->iommu_notify
);
1474 static void memory_region_finalize(Object
*obj
)
1476 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1478 assert(!mr
->container
);
1480 /* We know the region is not visible in any address space (it
1481 * does not have a container and cannot be a root either because
1482 * it has no references, so we can blindly clear mr->enabled.
1483 * memory_region_set_enabled instead could trigger a transaction
1484 * and cause an infinite loop.
1486 mr
->enabled
= false;
1487 memory_region_transaction_begin();
1488 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1489 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1490 memory_region_del_subregion(mr
, subregion
);
1492 memory_region_transaction_commit();
1495 memory_region_clear_coalescing(mr
);
1496 g_free((char *)mr
->name
);
1497 g_free(mr
->ioeventfds
);
1500 Object
*memory_region_owner(MemoryRegion
*mr
)
1502 Object
*obj
= OBJECT(mr
);
1506 void memory_region_ref(MemoryRegion
*mr
)
1508 /* MMIO callbacks most likely will access data that belongs
1509 * to the owner, hence the need to ref/unref the owner whenever
1510 * the memory region is in use.
1512 * The memory region is a child of its owner. As long as the
1513 * owner doesn't call unparent itself on the memory region,
1514 * ref-ing the owner will also keep the memory region alive.
1515 * Memory regions without an owner are supposed to never go away;
1516 * we do not ref/unref them because it slows down DMA sensibly.
1518 if (mr
&& mr
->owner
) {
1519 object_ref(mr
->owner
);
1523 void memory_region_unref(MemoryRegion
*mr
)
1525 if (mr
&& mr
->owner
) {
1526 object_unref(mr
->owner
);
1530 uint64_t memory_region_size(MemoryRegion
*mr
)
1532 if (int128_eq(mr
->size
, int128_2_64())) {
1535 return int128_get64(mr
->size
);
1538 const char *memory_region_name(const MemoryRegion
*mr
)
1541 ((MemoryRegion
*)mr
)->name
=
1542 object_get_canonical_path_component(OBJECT(mr
));
1547 bool memory_region_is_skip_dump(MemoryRegion
*mr
)
1549 return mr
->skip_dump
;
1552 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1554 uint8_t mask
= mr
->dirty_log_mask
;
1555 if (global_dirty_log
) {
1556 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1561 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1563 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1566 void memory_region_register_iommu_notifier(MemoryRegion
*mr
, Notifier
*n
)
1568 if (mr
->iommu_ops
->notify_started
&&
1569 QLIST_EMPTY(&mr
->iommu_notify
.notifiers
)) {
1570 mr
->iommu_ops
->notify_started(mr
);
1572 notifier_list_add(&mr
->iommu_notify
, n
);
1575 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion
*mr
)
1577 assert(memory_region_is_iommu(mr
));
1578 if (mr
->iommu_ops
&& mr
->iommu_ops
->get_min_page_size
) {
1579 return mr
->iommu_ops
->get_min_page_size(mr
);
1581 return TARGET_PAGE_SIZE
;
1584 void memory_region_iommu_replay(MemoryRegion
*mr
, Notifier
*n
, bool is_write
)
1586 hwaddr addr
, granularity
;
1587 IOMMUTLBEntry iotlb
;
1589 granularity
= memory_region_iommu_get_min_page_size(mr
);
1591 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1592 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
1593 if (iotlb
.perm
!= IOMMU_NONE
) {
1594 n
->notify(n
, &iotlb
);
1597 /* if (2^64 - MR size) < granularity, it's possible to get an
1598 * infinite loop here. This should catch such a wraparound */
1599 if ((addr
+ granularity
) < addr
) {
1605 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
, Notifier
*n
)
1608 if (mr
->iommu_ops
->notify_stopped
&&
1609 QLIST_EMPTY(&mr
->iommu_notify
.notifiers
)) {
1610 mr
->iommu_ops
->notify_stopped(mr
);
1614 void memory_region_notify_iommu(MemoryRegion
*mr
,
1615 IOMMUTLBEntry entry
)
1617 assert(memory_region_is_iommu(mr
));
1618 notifier_list_notify(&mr
->iommu_notify
, &entry
);
1621 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
1623 uint8_t mask
= 1 << client
;
1624 uint8_t old_logging
;
1626 assert(client
== DIRTY_MEMORY_VGA
);
1627 old_logging
= mr
->vga_logging_count
;
1628 mr
->vga_logging_count
+= log
? 1 : -1;
1629 if (!!old_logging
== !!mr
->vga_logging_count
) {
1633 memory_region_transaction_begin();
1634 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
1635 memory_region_update_pending
|= mr
->enabled
;
1636 memory_region_transaction_commit();
1639 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
1640 hwaddr size
, unsigned client
)
1642 assert(mr
->ram_block
);
1643 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr
) + addr
,
1647 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
1650 assert(mr
->ram_block
);
1651 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
1653 memory_region_get_dirty_log_mask(mr
));
1656 bool memory_region_test_and_clear_dirty(MemoryRegion
*mr
, hwaddr addr
,
1657 hwaddr size
, unsigned client
)
1659 assert(mr
->ram_block
);
1660 return cpu_physical_memory_test_and_clear_dirty(
1661 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1665 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
1670 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1671 FlatView
*view
= address_space_get_flatview(as
);
1672 FOR_EACH_FLAT_RANGE(fr
, view
) {
1674 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, log_sync
);
1677 flatview_unref(view
);
1681 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
1683 if (mr
->readonly
!= readonly
) {
1684 memory_region_transaction_begin();
1685 mr
->readonly
= readonly
;
1686 memory_region_update_pending
|= mr
->enabled
;
1687 memory_region_transaction_commit();
1691 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
1693 if (mr
->romd_mode
!= romd_mode
) {
1694 memory_region_transaction_begin();
1695 mr
->romd_mode
= romd_mode
;
1696 memory_region_update_pending
|= mr
->enabled
;
1697 memory_region_transaction_commit();
1701 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
1702 hwaddr size
, unsigned client
)
1704 assert(mr
->ram_block
);
1705 cpu_physical_memory_test_and_clear_dirty(
1706 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
1709 int memory_region_get_fd(MemoryRegion
*mr
)
1717 fd
= mr
->ram_block
->fd
;
1723 void memory_region_set_fd(MemoryRegion
*mr
, int fd
)
1729 mr
->ram_block
->fd
= fd
;
1733 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
1736 uint64_t offset
= 0;
1740 offset
+= mr
->alias_offset
;
1743 assert(mr
->ram_block
);
1744 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
1750 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
1754 block
= qemu_ram_block_from_host(ptr
, false, offset
);
1762 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
1764 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
1767 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
1769 assert(mr
->ram_block
);
1771 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
1774 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
1778 CoalescedMemoryRange
*cmr
;
1780 MemoryRegionSection section
;
1782 view
= address_space_get_flatview(as
);
1783 FOR_EACH_FLAT_RANGE(fr
, view
) {
1785 section
= (MemoryRegionSection
) {
1786 .address_space
= as
,
1787 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
1788 .size
= fr
->addr
.size
,
1791 MEMORY_LISTENER_CALL(coalesced_mmio_del
, Reverse
, §ion
,
1792 int128_get64(fr
->addr
.start
),
1793 int128_get64(fr
->addr
.size
));
1794 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
1795 tmp
= addrrange_shift(cmr
->addr
,
1796 int128_sub(fr
->addr
.start
,
1797 int128_make64(fr
->offset_in_region
)));
1798 if (!addrrange_intersects(tmp
, fr
->addr
)) {
1801 tmp
= addrrange_intersection(tmp
, fr
->addr
);
1802 MEMORY_LISTENER_CALL(coalesced_mmio_add
, Forward
, §ion
,
1803 int128_get64(tmp
.start
),
1804 int128_get64(tmp
.size
));
1808 flatview_unref(view
);
1811 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
1815 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1816 memory_region_update_coalesced_range_as(mr
, as
);
1820 void memory_region_set_coalescing(MemoryRegion
*mr
)
1822 memory_region_clear_coalescing(mr
);
1823 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
1826 void memory_region_add_coalescing(MemoryRegion
*mr
,
1830 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
1832 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
1833 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
1834 memory_region_update_coalesced_range(mr
);
1835 memory_region_set_flush_coalesced(mr
);
1838 void memory_region_clear_coalescing(MemoryRegion
*mr
)
1840 CoalescedMemoryRange
*cmr
;
1841 bool updated
= false;
1843 qemu_flush_coalesced_mmio_buffer();
1844 mr
->flush_coalesced_mmio
= false;
1846 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
1847 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
1848 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
1854 memory_region_update_coalesced_range(mr
);
1858 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
1860 mr
->flush_coalesced_mmio
= true;
1863 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
1865 qemu_flush_coalesced_mmio_buffer();
1866 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
1867 mr
->flush_coalesced_mmio
= false;
1871 void memory_region_set_global_locking(MemoryRegion
*mr
)
1873 mr
->global_locking
= true;
1876 void memory_region_clear_global_locking(MemoryRegion
*mr
)
1878 mr
->global_locking
= false;
1881 static bool userspace_eventfd_warning
;
1883 void memory_region_add_eventfd(MemoryRegion
*mr
,
1890 MemoryRegionIoeventfd mrfd
= {
1891 .addr
.start
= int128_make64(addr
),
1892 .addr
.size
= int128_make64(size
),
1893 .match_data
= match_data
,
1899 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
1900 userspace_eventfd_warning
))) {
1901 userspace_eventfd_warning
= true;
1902 error_report("Using eventfd without MMIO binding in KVM. "
1903 "Suboptimal performance expected");
1907 adjust_endianness(mr
, &mrfd
.data
, size
);
1909 memory_region_transaction_begin();
1910 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1911 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
1916 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
1917 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
1918 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
1919 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
1920 mr
->ioeventfds
[i
] = mrfd
;
1921 ioeventfd_update_pending
|= mr
->enabled
;
1922 memory_region_transaction_commit();
1925 void memory_region_del_eventfd(MemoryRegion
*mr
,
1932 MemoryRegionIoeventfd mrfd
= {
1933 .addr
.start
= int128_make64(addr
),
1934 .addr
.size
= int128_make64(size
),
1935 .match_data
= match_data
,
1942 adjust_endianness(mr
, &mrfd
.data
, size
);
1944 memory_region_transaction_begin();
1945 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1946 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
1950 assert(i
!= mr
->ioeventfd_nb
);
1951 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
1952 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
1954 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
1955 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
1956 ioeventfd_update_pending
|= mr
->enabled
;
1957 memory_region_transaction_commit();
1960 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
1962 MemoryRegion
*mr
= subregion
->container
;
1963 MemoryRegion
*other
;
1965 memory_region_transaction_begin();
1967 memory_region_ref(subregion
);
1968 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
1969 if (subregion
->priority
>= other
->priority
) {
1970 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
1974 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
1976 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
1977 memory_region_transaction_commit();
1980 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
1982 MemoryRegion
*subregion
)
1984 assert(!subregion
->container
);
1985 subregion
->container
= mr
;
1986 subregion
->addr
= offset
;
1987 memory_region_update_container_subregions(subregion
);
1990 void memory_region_add_subregion(MemoryRegion
*mr
,
1992 MemoryRegion
*subregion
)
1994 subregion
->priority
= 0;
1995 memory_region_add_subregion_common(mr
, offset
, subregion
);
1998 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2000 MemoryRegion
*subregion
,
2003 subregion
->priority
= priority
;
2004 memory_region_add_subregion_common(mr
, offset
, subregion
);
2007 void memory_region_del_subregion(MemoryRegion
*mr
,
2008 MemoryRegion
*subregion
)
2010 memory_region_transaction_begin();
2011 assert(subregion
->container
== mr
);
2012 subregion
->container
= NULL
;
2013 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2014 memory_region_unref(subregion
);
2015 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2016 memory_region_transaction_commit();
2019 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2021 if (enabled
== mr
->enabled
) {
2024 memory_region_transaction_begin();
2025 mr
->enabled
= enabled
;
2026 memory_region_update_pending
= true;
2027 memory_region_transaction_commit();
2030 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2032 Int128 s
= int128_make64(size
);
2034 if (size
== UINT64_MAX
) {
2037 if (int128_eq(s
, mr
->size
)) {
2040 memory_region_transaction_begin();
2042 memory_region_update_pending
= true;
2043 memory_region_transaction_commit();
2046 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2048 MemoryRegion
*container
= mr
->container
;
2051 memory_region_transaction_begin();
2052 memory_region_ref(mr
);
2053 memory_region_del_subregion(container
, mr
);
2054 mr
->container
= container
;
2055 memory_region_update_container_subregions(mr
);
2056 memory_region_unref(mr
);
2057 memory_region_transaction_commit();
2061 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2063 if (addr
!= mr
->addr
) {
2065 memory_region_readd_subregion(mr
);
2069 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2073 if (offset
== mr
->alias_offset
) {
2077 memory_region_transaction_begin();
2078 mr
->alias_offset
= offset
;
2079 memory_region_update_pending
|= mr
->enabled
;
2080 memory_region_transaction_commit();
2083 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2088 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2090 const AddrRange
*addr
= addr_
;
2091 const FlatRange
*fr
= fr_
;
2093 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2095 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2101 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2103 return bsearch(&addr
, view
->ranges
, view
->nr
,
2104 sizeof(FlatRange
), cmp_flatrange_addr
);
2107 bool memory_region_is_mapped(MemoryRegion
*mr
)
2109 return mr
->container
? true : false;
2112 /* Same as memory_region_find, but it does not add a reference to the
2113 * returned region. It must be called from an RCU critical section.
2115 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2116 hwaddr addr
, uint64_t size
)
2118 MemoryRegionSection ret
= { .mr
= NULL
};
2126 for (root
= mr
; root
->container
; ) {
2127 root
= root
->container
;
2131 as
= memory_region_to_address_space(root
);
2135 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2137 view
= atomic_rcu_read(&as
->current_map
);
2138 fr
= flatview_lookup(view
, range
);
2143 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2148 ret
.address_space
= as
;
2149 range
= addrrange_intersection(range
, fr
->addr
);
2150 ret
.offset_within_region
= fr
->offset_in_region
;
2151 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2153 ret
.size
= range
.size
;
2154 ret
.offset_within_address_space
= int128_get64(range
.start
);
2155 ret
.readonly
= fr
->readonly
;
2159 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2160 hwaddr addr
, uint64_t size
)
2162 MemoryRegionSection ret
;
2164 ret
= memory_region_find_rcu(mr
, addr
, size
);
2166 memory_region_ref(ret
.mr
);
2172 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2177 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2179 return mr
&& mr
!= container
;
2182 void address_space_sync_dirty_bitmap(AddressSpace
*as
)
2187 view
= address_space_get_flatview(as
);
2188 FOR_EACH_FLAT_RANGE(fr
, view
) {
2189 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, log_sync
);
2191 flatview_unref(view
);
2194 void memory_global_dirty_log_start(void)
2196 global_dirty_log
= true;
2198 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2200 /* Refresh DIRTY_LOG_MIGRATION bit. */
2201 memory_region_transaction_begin();
2202 memory_region_update_pending
= true;
2203 memory_region_transaction_commit();
2206 void memory_global_dirty_log_stop(void)
2208 global_dirty_log
= false;
2210 /* Refresh DIRTY_LOG_MIGRATION bit. */
2211 memory_region_transaction_begin();
2212 memory_region_update_pending
= true;
2213 memory_region_transaction_commit();
2215 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2218 static void listener_add_address_space(MemoryListener
*listener
,
2224 if (listener
->address_space_filter
2225 && listener
->address_space_filter
!= as
) {
2229 if (listener
->begin
) {
2230 listener
->begin(listener
);
2232 if (global_dirty_log
) {
2233 if (listener
->log_global_start
) {
2234 listener
->log_global_start(listener
);
2238 view
= address_space_get_flatview(as
);
2239 FOR_EACH_FLAT_RANGE(fr
, view
) {
2240 MemoryRegionSection section
= {
2242 .address_space
= as
,
2243 .offset_within_region
= fr
->offset_in_region
,
2244 .size
= fr
->addr
.size
,
2245 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
2246 .readonly
= fr
->readonly
,
2248 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2249 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2251 if (listener
->region_add
) {
2252 listener
->region_add(listener
, §ion
);
2255 if (listener
->commit
) {
2256 listener
->commit(listener
);
2258 flatview_unref(view
);
2261 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*filter
)
2263 MemoryListener
*other
= NULL
;
2266 listener
->address_space_filter
= filter
;
2267 if (QTAILQ_EMPTY(&memory_listeners
)
2268 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
,
2269 memory_listeners
)->priority
) {
2270 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2272 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2273 if (listener
->priority
< other
->priority
) {
2277 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2280 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2281 listener_add_address_space(listener
, as
);
2285 void memory_listener_unregister(MemoryListener
*listener
)
2287 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2290 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2292 memory_region_ref(root
);
2293 memory_region_transaction_begin();
2296 as
->malloced
= false;
2297 as
->current_map
= g_new(FlatView
, 1);
2298 flatview_init(as
->current_map
);
2299 as
->ioeventfd_nb
= 0;
2300 as
->ioeventfds
= NULL
;
2301 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2302 as
->name
= g_strdup(name
? name
: "anonymous");
2303 address_space_init_dispatch(as
);
2304 memory_region_update_pending
|= root
->enabled
;
2305 memory_region_transaction_commit();
2308 static void do_address_space_destroy(AddressSpace
*as
)
2310 MemoryListener
*listener
;
2311 bool do_free
= as
->malloced
;
2313 address_space_destroy_dispatch(as
);
2315 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2316 assert(listener
->address_space_filter
!= as
);
2319 flatview_unref(as
->current_map
);
2321 g_free(as
->ioeventfds
);
2322 memory_region_unref(as
->root
);
2328 AddressSpace
*address_space_init_shareable(MemoryRegion
*root
, const char *name
)
2332 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2333 if (root
== as
->root
&& as
->malloced
) {
2339 as
= g_malloc0(sizeof *as
);
2340 address_space_init(as
, root
, name
);
2341 as
->malloced
= true;
2345 void address_space_destroy(AddressSpace
*as
)
2347 MemoryRegion
*root
= as
->root
;
2350 if (as
->ref_count
) {
2353 /* Flush out anything from MemoryListeners listening in on this */
2354 memory_region_transaction_begin();
2356 memory_region_transaction_commit();
2357 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2358 address_space_unregister(as
);
2360 /* At this point, as->dispatch and as->current_map are dummy
2361 * entries that the guest should never use. Wait for the old
2362 * values to expire before freeing the data.
2365 call_rcu(as
, do_address_space_destroy
, rcu
);
2368 typedef struct MemoryRegionList MemoryRegionList
;
2370 struct MemoryRegionList
{
2371 const MemoryRegion
*mr
;
2372 QTAILQ_ENTRY(MemoryRegionList
) queue
;
2375 typedef QTAILQ_HEAD(queue
, MemoryRegionList
) MemoryRegionListHead
;
2377 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
2378 const MemoryRegion
*mr
, unsigned int level
,
2380 MemoryRegionListHead
*alias_print_queue
)
2382 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2383 MemoryRegionListHead submr_print_queue
;
2384 const MemoryRegion
*submr
;
2391 for (i
= 0; i
< level
; i
++) {
2396 MemoryRegionList
*ml
;
2399 /* check if the alias is already in the queue */
2400 QTAILQ_FOREACH(ml
, alias_print_queue
, queue
) {
2401 if (ml
->mr
== mr
->alias
) {
2407 ml
= g_new(MemoryRegionList
, 1);
2409 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, queue
);
2411 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
2412 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
2413 "-" TARGET_FMT_plx
"%s\n",
2416 + (int128_nz(mr
->size
) ?
2417 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2418 int128_one())) : 0),
2420 mr
->romd_mode
? 'R' : '-',
2421 !mr
->readonly
&& !(mr
->rom_device
&& mr
->romd_mode
) ? 'W'
2423 memory_region_name(mr
),
2424 memory_region_name(mr
->alias
),
2427 + (int128_nz(mr
->size
) ?
2428 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2429 int128_one())) : 0),
2430 mr
->enabled
? "" : " [disabled]");
2433 TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d, %c%c): %s%s\n",
2436 + (int128_nz(mr
->size
) ?
2437 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2438 int128_one())) : 0),
2440 mr
->romd_mode
? 'R' : '-',
2441 !mr
->readonly
&& !(mr
->rom_device
&& mr
->romd_mode
) ? 'W'
2443 memory_region_name(mr
),
2444 mr
->enabled
? "" : " [disabled]");
2447 QTAILQ_INIT(&submr_print_queue
);
2449 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2450 new_ml
= g_new(MemoryRegionList
, 1);
2452 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2453 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2454 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2455 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2456 QTAILQ_INSERT_BEFORE(ml
, new_ml
, queue
);
2462 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, queue
);
2466 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2467 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, base
+ mr
->addr
,
2471 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, queue
, next_ml
) {
2476 void mtree_info(fprintf_function mon_printf
, void *f
)
2478 MemoryRegionListHead ml_head
;
2479 MemoryRegionList
*ml
, *ml2
;
2482 QTAILQ_INIT(&ml_head
);
2484 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2485 mon_printf(f
, "address-space: %s\n", as
->name
);
2486 mtree_print_mr(mon_printf
, f
, as
->root
, 1, 0, &ml_head
);
2487 mon_printf(f
, "\n");
2490 /* print aliased regions */
2491 QTAILQ_FOREACH(ml
, &ml_head
, queue
) {
2492 mon_printf(f
, "memory-region: %s\n", memory_region_name(ml
->mr
));
2493 mtree_print_mr(mon_printf
, f
, ml
->mr
, 1, 0, &ml_head
);
2494 mon_printf(f
, "\n");
2497 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, queue
, ml2
) {
2502 static const TypeInfo memory_region_info
= {
2503 .parent
= TYPE_OBJECT
,
2504 .name
= TYPE_MEMORY_REGION
,
2505 .instance_size
= sizeof(MemoryRegion
),
2506 .instance_init
= memory_region_initfn
,
2507 .instance_finalize
= memory_region_finalize
,
2510 static void memory_register_types(void)
2512 type_register_static(&memory_region_info
);
2515 type_init(memory_register_types
)