2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "exec/memory.h"
17 #include "exec/address-spaces.h"
18 #include "exec/ioport.h"
19 #include "qapi/visitor.h"
20 #include "qemu/bitops.h"
21 #include "qom/object.h"
25 #include "exec/memory-internal.h"
26 #include "exec/ram_addr.h"
27 #include "sysemu/sysemu.h"
29 static unsigned memory_region_transaction_depth
;
30 static bool memory_region_update_pending
;
31 static bool ioeventfd_update_pending
;
32 static bool global_dirty_log
= false;
34 static QTAILQ_HEAD(memory_listeners
, MemoryListener
) memory_listeners
35 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
37 static QTAILQ_HEAD(, AddressSpace
) address_spaces
38 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
40 typedef struct AddrRange AddrRange
;
43 * Note that signed integers are needed for negative offsetting in aliases
44 * (large MemoryRegion::alias_offset).
51 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
53 return (AddrRange
) { start
, size
};
56 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
58 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
61 static Int128
addrrange_end(AddrRange r
)
63 return int128_add(r
.start
, r
.size
);
66 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
68 int128_addto(&range
.start
, delta
);
72 static bool addrrange_contains(AddrRange range
, Int128 addr
)
74 return int128_ge(addr
, range
.start
)
75 && int128_lt(addr
, addrrange_end(range
));
78 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
80 return addrrange_contains(r1
, r2
.start
)
81 || addrrange_contains(r2
, r1
.start
);
84 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
86 Int128 start
= int128_max(r1
.start
, r2
.start
);
87 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
88 return addrrange_make(start
, int128_sub(end
, start
));
91 enum ListenerDirection
{ Forward
, Reverse
};
93 static bool memory_listener_match(MemoryListener
*listener
,
94 MemoryRegionSection
*section
)
96 return !listener
->address_space_filter
97 || listener
->address_space_filter
== section
->address_space
;
100 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
102 MemoryListener *_listener; \
104 switch (_direction) { \
106 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
107 if (_listener->_callback) { \
108 _listener->_callback(_listener, ##_args); \
113 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
114 memory_listeners, link) { \
115 if (_listener->_callback) { \
116 _listener->_callback(_listener, ##_args); \
125 #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
127 MemoryListener *_listener; \
129 switch (_direction) { \
131 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
132 if (_listener->_callback \
133 && memory_listener_match(_listener, _section)) { \
134 _listener->_callback(_listener, _section, ##_args); \
139 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
140 memory_listeners, link) { \
141 if (_listener->_callback \
142 && memory_listener_match(_listener, _section)) { \
143 _listener->_callback(_listener, _section, ##_args); \
152 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
153 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \
154 MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
156 .address_space = (as), \
157 .offset_within_region = (fr)->offset_in_region, \
158 .size = (fr)->addr.size, \
159 .offset_within_address_space = int128_get64((fr)->addr.start), \
160 .readonly = (fr)->readonly, \
163 struct CoalescedMemoryRange
{
165 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
168 struct MemoryRegionIoeventfd
{
175 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
176 MemoryRegionIoeventfd b
)
178 if (int128_lt(a
.addr
.start
, b
.addr
.start
)) {
180 } else if (int128_gt(a
.addr
.start
, b
.addr
.start
)) {
182 } else if (int128_lt(a
.addr
.size
, b
.addr
.size
)) {
184 } else if (int128_gt(a
.addr
.size
, b
.addr
.size
)) {
186 } else if (a
.match_data
< b
.match_data
) {
188 } else if (a
.match_data
> b
.match_data
) {
190 } else if (a
.match_data
) {
191 if (a
.data
< b
.data
) {
193 } else if (a
.data
> b
.data
) {
199 } else if (a
.e
> b
.e
) {
205 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
206 MemoryRegionIoeventfd b
)
208 return !memory_region_ioeventfd_before(a
, b
)
209 && !memory_region_ioeventfd_before(b
, a
);
212 typedef struct FlatRange FlatRange
;
213 typedef struct FlatView FlatView
;
215 /* Range of memory in the global map. Addresses are absolute. */
218 hwaddr offset_in_region
;
220 uint8_t dirty_log_mask
;
225 /* Flattened global view of current active memory hierarchy. Kept in sorted
233 unsigned nr_allocated
;
236 typedef struct AddressSpaceOps AddressSpaceOps
;
238 #define FOR_EACH_FLAT_RANGE(var, view) \
239 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
241 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
243 return a
->mr
== b
->mr
244 && addrrange_equal(a
->addr
, b
->addr
)
245 && a
->offset_in_region
== b
->offset_in_region
246 && a
->romd_mode
== b
->romd_mode
247 && a
->readonly
== b
->readonly
;
250 static void flatview_init(FlatView
*view
)
255 view
->nr_allocated
= 0;
258 /* Insert a range into a given position. Caller is responsible for maintaining
261 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
263 if (view
->nr
== view
->nr_allocated
) {
264 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
265 view
->ranges
= g_realloc(view
->ranges
,
266 view
->nr_allocated
* sizeof(*view
->ranges
));
268 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
269 (view
->nr
- pos
) * sizeof(FlatRange
));
270 view
->ranges
[pos
] = *range
;
271 memory_region_ref(range
->mr
);
275 static void flatview_destroy(FlatView
*view
)
279 for (i
= 0; i
< view
->nr
; i
++) {
280 memory_region_unref(view
->ranges
[i
].mr
);
282 g_free(view
->ranges
);
286 static void flatview_ref(FlatView
*view
)
288 atomic_inc(&view
->ref
);
291 static void flatview_unref(FlatView
*view
)
293 if (atomic_fetch_dec(&view
->ref
) == 1) {
294 flatview_destroy(view
);
298 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
300 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
302 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
304 int128_make64(r2
->offset_in_region
))
305 && r1
->dirty_log_mask
== r2
->dirty_log_mask
306 && r1
->romd_mode
== r2
->romd_mode
307 && r1
->readonly
== r2
->readonly
;
310 /* Attempt to simplify a view by merging adjacent ranges */
311 static void flatview_simplify(FlatView
*view
)
316 while (i
< view
->nr
) {
319 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
320 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
324 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
325 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
330 static bool memory_region_big_endian(MemoryRegion
*mr
)
332 #ifdef TARGET_WORDS_BIGENDIAN
333 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
335 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
339 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
341 #ifdef TARGET_WORDS_BIGENDIAN
342 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
344 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
348 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
350 if (memory_region_wrong_endianness(mr
)) {
355 *data
= bswap16(*data
);
358 *data
= bswap32(*data
);
361 *data
= bswap64(*data
);
369 static void memory_region_oldmmio_read_accessor(MemoryRegion
*mr
,
378 tmp
= mr
->ops
->old_mmio
.read
[ctz32(size
)](mr
->opaque
, addr
);
379 trace_memory_region_ops_read(mr
, addr
, tmp
, size
);
380 *value
|= (tmp
& mask
) << shift
;
383 static void memory_region_read_accessor(MemoryRegion
*mr
,
392 if (mr
->flush_coalesced_mmio
) {
393 qemu_flush_coalesced_mmio_buffer();
395 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
396 trace_memory_region_ops_read(mr
, addr
, tmp
, size
);
397 *value
|= (tmp
& mask
) << shift
;
400 static void memory_region_oldmmio_write_accessor(MemoryRegion
*mr
,
409 tmp
= (*value
>> shift
) & mask
;
410 trace_memory_region_ops_write(mr
, addr
, tmp
, size
);
411 mr
->ops
->old_mmio
.write
[ctz32(size
)](mr
->opaque
, addr
, tmp
);
414 static void memory_region_write_accessor(MemoryRegion
*mr
,
423 if (mr
->flush_coalesced_mmio
) {
424 qemu_flush_coalesced_mmio_buffer();
426 tmp
= (*value
>> shift
) & mask
;
427 trace_memory_region_ops_write(mr
, addr
, tmp
, size
);
428 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
431 static void access_with_adjusted_size(hwaddr addr
,
434 unsigned access_size_min
,
435 unsigned access_size_max
,
436 void (*access
)(MemoryRegion
*mr
,
444 uint64_t access_mask
;
445 unsigned access_size
;
448 if (!access_size_min
) {
451 if (!access_size_max
) {
455 /* FIXME: support unaligned access? */
456 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
457 access_mask
= -1ULL >> (64 - access_size
* 8);
458 if (memory_region_big_endian(mr
)) {
459 for (i
= 0; i
< size
; i
+= access_size
) {
460 access(mr
, addr
+ i
, value
, access_size
,
461 (size
- access_size
- i
) * 8, access_mask
);
464 for (i
= 0; i
< size
; i
+= access_size
) {
465 access(mr
, addr
+ i
, value
, access_size
, i
* 8, access_mask
);
470 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
474 while (mr
->container
) {
477 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
478 if (mr
== as
->root
) {
485 /* Render a memory region into the global view. Ranges in @view obscure
488 static void render_memory_region(FlatView
*view
,
494 MemoryRegion
*subregion
;
496 hwaddr offset_in_region
;
506 int128_addto(&base
, int128_make64(mr
->addr
));
507 readonly
|= mr
->readonly
;
509 tmp
= addrrange_make(base
, mr
->size
);
511 if (!addrrange_intersects(tmp
, clip
)) {
515 clip
= addrrange_intersection(tmp
, clip
);
518 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
519 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
520 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
524 /* Render subregions in priority order. */
525 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
526 render_memory_region(view
, subregion
, base
, clip
, readonly
);
529 if (!mr
->terminates
) {
533 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
538 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
539 fr
.romd_mode
= mr
->romd_mode
;
540 fr
.readonly
= readonly
;
542 /* Render the region itself into any gaps left by the current view. */
543 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
544 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
547 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
548 now
= int128_min(remain
,
549 int128_sub(view
->ranges
[i
].addr
.start
, base
));
550 fr
.offset_in_region
= offset_in_region
;
551 fr
.addr
= addrrange_make(base
, now
);
552 flatview_insert(view
, i
, &fr
);
554 int128_addto(&base
, now
);
555 offset_in_region
+= int128_get64(now
);
556 int128_subfrom(&remain
, now
);
558 now
= int128_sub(int128_min(int128_add(base
, remain
),
559 addrrange_end(view
->ranges
[i
].addr
)),
561 int128_addto(&base
, now
);
562 offset_in_region
+= int128_get64(now
);
563 int128_subfrom(&remain
, now
);
565 if (int128_nz(remain
)) {
566 fr
.offset_in_region
= offset_in_region
;
567 fr
.addr
= addrrange_make(base
, remain
);
568 flatview_insert(view
, i
, &fr
);
572 /* Render a memory topology into a list of disjoint absolute ranges. */
573 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
577 view
= g_new(FlatView
, 1);
581 render_memory_region(view
, mr
, int128_zero(),
582 addrrange_make(int128_zero(), int128_2_64()), false);
584 flatview_simplify(view
);
589 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
590 MemoryRegionIoeventfd
*fds_new
,
592 MemoryRegionIoeventfd
*fds_old
,
596 MemoryRegionIoeventfd
*fd
;
597 MemoryRegionSection section
;
599 /* Generate a symmetric difference of the old and new fd sets, adding
600 * and deleting as necessary.
604 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
605 if (iold
< fds_old_nb
606 && (inew
== fds_new_nb
607 || memory_region_ioeventfd_before(fds_old
[iold
],
610 section
= (MemoryRegionSection
) {
612 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
613 .size
= fd
->addr
.size
,
615 MEMORY_LISTENER_CALL(eventfd_del
, Forward
, §ion
,
616 fd
->match_data
, fd
->data
, fd
->e
);
618 } else if (inew
< fds_new_nb
619 && (iold
== fds_old_nb
620 || memory_region_ioeventfd_before(fds_new
[inew
],
623 section
= (MemoryRegionSection
) {
625 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
626 .size
= fd
->addr
.size
,
628 MEMORY_LISTENER_CALL(eventfd_add
, Reverse
, §ion
,
629 fd
->match_data
, fd
->data
, fd
->e
);
638 static FlatView
*address_space_get_flatview(AddressSpace
*as
)
643 view
= atomic_rcu_read(&as
->current_map
);
649 static void address_space_update_ioeventfds(AddressSpace
*as
)
653 unsigned ioeventfd_nb
= 0;
654 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
658 view
= address_space_get_flatview(as
);
659 FOR_EACH_FLAT_RANGE(fr
, view
) {
660 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
661 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
662 int128_sub(fr
->addr
.start
,
663 int128_make64(fr
->offset_in_region
)));
664 if (addrrange_intersects(fr
->addr
, tmp
)) {
666 ioeventfds
= g_realloc(ioeventfds
,
667 ioeventfd_nb
* sizeof(*ioeventfds
));
668 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
669 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
674 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
675 as
->ioeventfds
, as
->ioeventfd_nb
);
677 g_free(as
->ioeventfds
);
678 as
->ioeventfds
= ioeventfds
;
679 as
->ioeventfd_nb
= ioeventfd_nb
;
680 flatview_unref(view
);
683 static void address_space_update_topology_pass(AddressSpace
*as
,
684 const FlatView
*old_view
,
685 const FlatView
*new_view
,
689 FlatRange
*frold
, *frnew
;
691 /* Generate a symmetric difference of the old and new memory maps.
692 * Kill ranges in the old map, and instantiate ranges in the new map.
695 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
696 if (iold
< old_view
->nr
) {
697 frold
= &old_view
->ranges
[iold
];
701 if (inew
< new_view
->nr
) {
702 frnew
= &new_view
->ranges
[inew
];
709 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
710 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
711 && !flatrange_equal(frold
, frnew
)))) {
712 /* In old but not in new, or in both but attributes changed. */
715 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
719 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
720 /* In both and unchanged (except logging may have changed) */
723 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
724 if (frold
->dirty_log_mask
&& !frnew
->dirty_log_mask
) {
725 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
);
726 } else if (frnew
->dirty_log_mask
&& !frold
->dirty_log_mask
) {
727 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
);
737 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
746 static void address_space_update_topology(AddressSpace
*as
)
748 FlatView
*old_view
= address_space_get_flatview(as
);
749 FlatView
*new_view
= generate_memory_topology(as
->root
);
751 address_space_update_topology_pass(as
, old_view
, new_view
, false);
752 address_space_update_topology_pass(as
, old_view
, new_view
, true);
754 /* Writes are protected by the BQL. */
755 atomic_rcu_set(&as
->current_map
, new_view
);
756 call_rcu(old_view
, flatview_unref
, rcu
);
758 /* Note that all the old MemoryRegions are still alive up to this
759 * point. This relieves most MemoryListeners from the need to
760 * ref/unref the MemoryRegions they get---unless they use them
761 * outside the iothread mutex, in which case precise reference
762 * counting is necessary.
764 flatview_unref(old_view
);
766 address_space_update_ioeventfds(as
);
769 void memory_region_transaction_begin(void)
771 qemu_flush_coalesced_mmio_buffer();
772 ++memory_region_transaction_depth
;
775 static void memory_region_clear_pending(void)
777 memory_region_update_pending
= false;
778 ioeventfd_update_pending
= false;
781 void memory_region_transaction_commit(void)
785 assert(memory_region_transaction_depth
);
786 --memory_region_transaction_depth
;
787 if (!memory_region_transaction_depth
) {
788 if (memory_region_update_pending
) {
789 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
791 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
792 address_space_update_topology(as
);
795 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
796 } else if (ioeventfd_update_pending
) {
797 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
798 address_space_update_ioeventfds(as
);
801 memory_region_clear_pending();
805 static void memory_region_destructor_none(MemoryRegion
*mr
)
809 static void memory_region_destructor_ram(MemoryRegion
*mr
)
811 qemu_ram_free(mr
->ram_addr
);
814 static void memory_region_destructor_alias(MemoryRegion
*mr
)
816 memory_region_unref(mr
->alias
);
819 static void memory_region_destructor_ram_from_ptr(MemoryRegion
*mr
)
821 qemu_ram_free_from_ptr(mr
->ram_addr
);
824 static void memory_region_destructor_rom_device(MemoryRegion
*mr
)
826 qemu_ram_free(mr
->ram_addr
& TARGET_PAGE_MASK
);
829 static bool memory_region_need_escape(char c
)
831 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
834 static char *memory_region_escape_name(const char *name
)
841 for (p
= name
; *p
; p
++) {
842 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
844 if (bytes
== p
- name
) {
845 return g_memdup(name
, bytes
+ 1);
848 escaped
= g_malloc(bytes
+ 1);
849 for (p
= name
, q
= escaped
; *p
; p
++) {
851 if (unlikely(memory_region_need_escape(c
))) {
854 *q
++ = "0123456789abcdef"[c
>> 4];
855 c
= "0123456789abcdef"[c
& 15];
863 void memory_region_init(MemoryRegion
*mr
,
869 owner
= qdev_get_machine();
872 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
873 mr
->size
= int128_make64(size
);
874 if (size
== UINT64_MAX
) {
875 mr
->size
= int128_2_64();
877 mr
->name
= g_strdup(name
);
880 char *escaped_name
= memory_region_escape_name(name
);
881 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
882 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
883 object_unref(OBJECT(mr
));
885 g_free(escaped_name
);
889 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, void *opaque
,
890 const char *name
, Error
**errp
)
892 MemoryRegion
*mr
= MEMORY_REGION(obj
);
893 uint64_t value
= mr
->addr
;
895 visit_type_uint64(v
, &value
, name
, errp
);
898 static void memory_region_get_container(Object
*obj
, Visitor
*v
, void *opaque
,
899 const char *name
, Error
**errp
)
901 MemoryRegion
*mr
= MEMORY_REGION(obj
);
902 gchar
*path
= (gchar
*)"";
905 path
= object_get_canonical_path(OBJECT(mr
->container
));
907 visit_type_str(v
, &path
, name
, errp
);
913 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
916 MemoryRegion
*mr
= MEMORY_REGION(obj
);
918 return OBJECT(mr
->container
);
921 static void memory_region_get_priority(Object
*obj
, Visitor
*v
, void *opaque
,
922 const char *name
, Error
**errp
)
924 MemoryRegion
*mr
= MEMORY_REGION(obj
);
925 int32_t value
= mr
->priority
;
927 visit_type_int32(v
, &value
, name
, errp
);
930 static bool memory_region_get_may_overlap(Object
*obj
, Error
**errp
)
932 MemoryRegion
*mr
= MEMORY_REGION(obj
);
934 return mr
->may_overlap
;
937 static void memory_region_get_size(Object
*obj
, Visitor
*v
, void *opaque
,
938 const char *name
, Error
**errp
)
940 MemoryRegion
*mr
= MEMORY_REGION(obj
);
941 uint64_t value
= memory_region_size(mr
);
943 visit_type_uint64(v
, &value
, name
, errp
);
946 static void memory_region_initfn(Object
*obj
)
948 MemoryRegion
*mr
= MEMORY_REGION(obj
);
951 mr
->ops
= &unassigned_mem_ops
;
953 mr
->romd_mode
= true;
954 mr
->destructor
= memory_region_destructor_none
;
955 QTAILQ_INIT(&mr
->subregions
);
956 QTAILQ_INIT(&mr
->coalesced
);
958 op
= object_property_add(OBJECT(mr
), "container",
959 "link<" TYPE_MEMORY_REGION
">",
960 memory_region_get_container
,
961 NULL
, /* memory_region_set_container */
962 NULL
, NULL
, &error_abort
);
963 op
->resolve
= memory_region_resolve_container
;
965 object_property_add(OBJECT(mr
), "addr", "uint64",
966 memory_region_get_addr
,
967 NULL
, /* memory_region_set_addr */
968 NULL
, NULL
, &error_abort
);
969 object_property_add(OBJECT(mr
), "priority", "uint32",
970 memory_region_get_priority
,
971 NULL
, /* memory_region_set_priority */
972 NULL
, NULL
, &error_abort
);
973 object_property_add_bool(OBJECT(mr
), "may-overlap",
974 memory_region_get_may_overlap
,
975 NULL
, /* memory_region_set_may_overlap */
977 object_property_add(OBJECT(mr
), "size", "uint64",
978 memory_region_get_size
,
979 NULL
, /* memory_region_set_size, */
980 NULL
, NULL
, &error_abort
);
983 static int qemu_target_backtrace(target_ulong
*array
, size_t size
)
987 #if defined(TARGET_ARM)
988 CPUArchState
*env
= current_cpu
->env_ptr
;
989 array
[0] = env
->regs
[15];
990 array
[1] = env
->regs
[14];
991 #elif defined(TARGET_MIPS)
992 CPUArchState
*env
= current_cpu
->env_ptr
;
993 array
[0] = env
->active_tc
.PC
;
994 array
[1] = env
->active_tc
.gpr
[31];
1004 #include "disas/disas.h"
1005 const char *qemu_sprint_backtrace(char *buffer
, size_t length
)
1009 target_ulong caller
[2];
1011 qemu_target_backtrace(caller
, 2);
1012 symbol
= lookup_symbol(caller
[0]);
1013 p
+= sprintf(p
, "[%s]", symbol
);
1014 symbol
= lookup_symbol(caller
[1]);
1015 p
+= sprintf(p
, "[%s]", symbol
);
1017 p
+= sprintf(p
, "[cpu not running]");
1019 assert((p
- buffer
) < length
);
1023 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1026 if (trace_unassigned
) {
1028 fprintf(stderr
, "Unassigned mem read " TARGET_FMT_plx
" %s\n",
1029 addr
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1032 if (current_cpu
!= NULL
) {
1033 cpu_unassigned_access(current_cpu
, addr
, false, false, 0, size
);
1038 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1039 uint64_t val
, unsigned size
)
1041 if (trace_unassigned
) {
1043 fprintf(stderr
, "Unassigned mem write " TARGET_FMT_plx
1044 " = 0x%" PRIx64
" %s\n",
1045 addr
, val
, qemu_sprint_backtrace(buffer
, sizeof(buffer
)));
1047 if (current_cpu
!= NULL
) {
1048 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1052 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1053 unsigned size
, bool is_write
)
1058 const MemoryRegionOps unassigned_mem_ops
= {
1059 .valid
.accepts
= unassigned_mem_accepts
,
1060 .endianness
= DEVICE_NATIVE_ENDIAN
,
1063 bool memory_region_access_valid(MemoryRegion
*mr
,
1068 int access_size_min
, access_size_max
;
1071 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1072 fprintf(stderr
, "Misaligned i/o with size %u for memory region %s\n",
1077 if (!mr
->ops
->valid
.accepts
) {
1081 access_size_min
= mr
->ops
->valid
.min_access_size
;
1082 if (!mr
->ops
->valid
.min_access_size
) {
1083 access_size_min
= 1;
1086 access_size_max
= mr
->ops
->valid
.max_access_size
;
1087 if (!mr
->ops
->valid
.max_access_size
) {
1088 access_size_max
= 4;
1091 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1092 for (i
= 0; i
< size
; i
+= access_size
) {
1093 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1102 static uint64_t memory_region_dispatch_read1(MemoryRegion
*mr
,
1108 if (mr
->ops
->read
) {
1109 access_with_adjusted_size(addr
, &data
, size
,
1110 mr
->ops
->impl
.min_access_size
,
1111 mr
->ops
->impl
.max_access_size
,
1112 memory_region_read_accessor
, mr
);
1114 access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1115 memory_region_oldmmio_read_accessor
, mr
);
1121 static bool memory_region_dispatch_read(MemoryRegion
*mr
,
1126 if (!memory_region_access_valid(mr
, addr
, size
, false)) {
1127 *pval
= unassigned_mem_read(mr
, addr
, size
);
1131 *pval
= memory_region_dispatch_read1(mr
, addr
, size
);
1132 adjust_endianness(mr
, pval
, size
);
1136 static bool memory_region_dispatch_write(MemoryRegion
*mr
,
1141 if (!memory_region_access_valid(mr
, addr
, size
, true)) {
1142 unassigned_mem_write(mr
, addr
, data
, size
);
1146 adjust_endianness(mr
, &data
, size
);
1148 if (mr
->ops
->write
) {
1149 access_with_adjusted_size(addr
, &data
, size
,
1150 mr
->ops
->impl
.min_access_size
,
1151 mr
->ops
->impl
.max_access_size
,
1152 memory_region_write_accessor
, mr
);
1154 access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1155 memory_region_oldmmio_write_accessor
, mr
);
1160 void memory_region_init_io(MemoryRegion
*mr
,
1162 const MemoryRegionOps
*ops
,
1167 memory_region_init(mr
, owner
, name
, size
);
1169 mr
->opaque
= opaque
;
1170 mr
->terminates
= true;
1171 mr
->ram_addr
= ~(ram_addr_t
)0;
1174 void memory_region_init_ram(MemoryRegion
*mr
,
1180 memory_region_init(mr
, owner
, name
, size
);
1182 mr
->terminates
= true;
1183 mr
->destructor
= memory_region_destructor_ram
;
1184 mr
->ram_addr
= qemu_ram_alloc(size
, mr
, errp
);
1187 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1192 void (*resized
)(const char*,
1197 memory_region_init(mr
, owner
, name
, size
);
1199 mr
->terminates
= true;
1200 mr
->destructor
= memory_region_destructor_ram
;
1201 mr
->ram_addr
= qemu_ram_alloc_resizeable(size
, max_size
, resized
, mr
, errp
);
1205 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1206 struct Object
*owner
,
1213 memory_region_init(mr
, owner
, name
, size
);
1215 mr
->terminates
= true;
1216 mr
->destructor
= memory_region_destructor_ram
;
1217 mr
->ram_addr
= qemu_ram_alloc_from_file(size
, mr
, share
, path
, errp
);
1221 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1227 memory_region_init(mr
, owner
, name
, size
);
1229 mr
->terminates
= true;
1230 mr
->destructor
= memory_region_destructor_ram_from_ptr
;
1232 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1233 assert(ptr
!= NULL
);
1234 mr
->ram_addr
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_abort
);
1237 void memory_region_set_skip_dump(MemoryRegion
*mr
)
1239 mr
->skip_dump
= true;
1242 void memory_region_init_alias(MemoryRegion
*mr
,
1249 memory_region_init(mr
, owner
, name
, size
);
1250 memory_region_ref(orig
);
1251 mr
->destructor
= memory_region_destructor_alias
;
1253 mr
->alias_offset
= offset
;
1256 void memory_region_init_rom_device(MemoryRegion
*mr
,
1258 const MemoryRegionOps
*ops
,
1264 memory_region_init(mr
, owner
, name
, size
);
1266 mr
->opaque
= opaque
;
1267 mr
->terminates
= true;
1268 mr
->rom_device
= true;
1269 mr
->destructor
= memory_region_destructor_rom_device
;
1270 mr
->ram_addr
= qemu_ram_alloc(size
, mr
, errp
);
1273 void memory_region_init_iommu(MemoryRegion
*mr
,
1275 const MemoryRegionIOMMUOps
*ops
,
1279 memory_region_init(mr
, owner
, name
, size
);
1280 mr
->iommu_ops
= ops
,
1281 mr
->terminates
= true; /* then re-forwards */
1282 notifier_list_init(&mr
->iommu_notify
);
1285 void memory_region_init_reservation(MemoryRegion
*mr
,
1290 memory_region_init_io(mr
, owner
, &unassigned_mem_ops
, mr
, name
, size
);
1293 static void memory_region_finalize(Object
*obj
)
1295 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1297 assert(QTAILQ_EMPTY(&mr
->subregions
));
1299 memory_region_clear_coalescing(mr
);
1300 g_free((char *)mr
->name
);
1301 g_free(mr
->ioeventfds
);
1304 Object
*memory_region_owner(MemoryRegion
*mr
)
1306 Object
*obj
= OBJECT(mr
);
1310 void memory_region_ref(MemoryRegion
*mr
)
1312 /* MMIO callbacks most likely will access data that belongs
1313 * to the owner, hence the need to ref/unref the owner whenever
1314 * the memory region is in use.
1316 * The memory region is a child of its owner. As long as the
1317 * owner doesn't call unparent itself on the memory region,
1318 * ref-ing the owner will also keep the memory region alive.
1319 * Memory regions without an owner are supposed to never go away,
1320 * but we still ref/unref them for debugging purposes.
1322 Object
*obj
= OBJECT(mr
);
1323 if (obj
&& obj
->parent
) {
1324 object_ref(obj
->parent
);
1330 void memory_region_unref(MemoryRegion
*mr
)
1332 Object
*obj
= OBJECT(mr
);
1333 if (obj
&& obj
->parent
) {
1334 object_unref(obj
->parent
);
1340 uint64_t memory_region_size(MemoryRegion
*mr
)
1342 if (int128_eq(mr
->size
, int128_2_64())) {
1345 return int128_get64(mr
->size
);
1348 const char *memory_region_name(const MemoryRegion
*mr
)
1351 ((MemoryRegion
*)mr
)->name
=
1352 object_get_canonical_path_component(OBJECT(mr
));
1357 bool memory_region_is_ram(MemoryRegion
*mr
)
1362 bool memory_region_is_skip_dump(MemoryRegion
*mr
)
1364 return mr
->skip_dump
;
1367 bool memory_region_is_logging(MemoryRegion
*mr
)
1369 return mr
->dirty_log_mask
;
1372 bool memory_region_is_rom(MemoryRegion
*mr
)
1374 return mr
->ram
&& mr
->readonly
;
1377 bool memory_region_is_iommu(MemoryRegion
*mr
)
1379 return mr
->iommu_ops
;
1382 void memory_region_register_iommu_notifier(MemoryRegion
*mr
, Notifier
*n
)
1384 notifier_list_add(&mr
->iommu_notify
, n
);
1387 void memory_region_unregister_iommu_notifier(Notifier
*n
)
1392 void memory_region_notify_iommu(MemoryRegion
*mr
,
1393 IOMMUTLBEntry entry
)
1395 assert(memory_region_is_iommu(mr
));
1396 notifier_list_notify(&mr
->iommu_notify
, &entry
);
1399 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
1401 uint8_t mask
= 1 << client
;
1403 memory_region_transaction_begin();
1404 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
1405 memory_region_update_pending
|= mr
->enabled
;
1406 memory_region_transaction_commit();
1409 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
1410 hwaddr size
, unsigned client
)
1412 assert(mr
->terminates
);
1413 return cpu_physical_memory_get_dirty(mr
->ram_addr
+ addr
, size
, client
);
1416 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
1419 assert(mr
->terminates
);
1420 cpu_physical_memory_set_dirty_range(mr
->ram_addr
+ addr
, size
);
1423 bool memory_region_test_and_clear_dirty(MemoryRegion
*mr
, hwaddr addr
,
1424 hwaddr size
, unsigned client
)
1427 assert(mr
->terminates
);
1428 ret
= cpu_physical_memory_get_dirty(mr
->ram_addr
+ addr
, size
, client
);
1430 cpu_physical_memory_reset_dirty(mr
->ram_addr
+ addr
, size
, client
);
1436 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
1441 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1442 FlatView
*view
= address_space_get_flatview(as
);
1443 FOR_EACH_FLAT_RANGE(fr
, view
) {
1445 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, log_sync
);
1448 flatview_unref(view
);
1452 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
1454 if (mr
->readonly
!= readonly
) {
1455 memory_region_transaction_begin();
1456 mr
->readonly
= readonly
;
1457 memory_region_update_pending
|= mr
->enabled
;
1458 memory_region_transaction_commit();
1462 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
1464 if (mr
->romd_mode
!= romd_mode
) {
1465 memory_region_transaction_begin();
1466 mr
->romd_mode
= romd_mode
;
1467 memory_region_update_pending
|= mr
->enabled
;
1468 memory_region_transaction_commit();
1472 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
1473 hwaddr size
, unsigned client
)
1475 assert(mr
->terminates
);
1476 cpu_physical_memory_reset_dirty(mr
->ram_addr
+ addr
, size
, client
);
1479 int memory_region_get_fd(MemoryRegion
*mr
)
1482 return memory_region_get_fd(mr
->alias
);
1485 assert(mr
->terminates
);
1487 return qemu_get_ram_fd(mr
->ram_addr
& TARGET_PAGE_MASK
);
1490 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
1493 return memory_region_get_ram_ptr(mr
->alias
) + mr
->alias_offset
;
1496 assert(mr
->terminates
);
1498 return qemu_get_ram_ptr(mr
->ram_addr
& TARGET_PAGE_MASK
);
1501 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
1505 CoalescedMemoryRange
*cmr
;
1507 MemoryRegionSection section
;
1509 view
= address_space_get_flatview(as
);
1510 FOR_EACH_FLAT_RANGE(fr
, view
) {
1512 section
= (MemoryRegionSection
) {
1513 .address_space
= as
,
1514 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
1515 .size
= fr
->addr
.size
,
1518 MEMORY_LISTENER_CALL(coalesced_mmio_del
, Reverse
, §ion
,
1519 int128_get64(fr
->addr
.start
),
1520 int128_get64(fr
->addr
.size
));
1521 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
1522 tmp
= addrrange_shift(cmr
->addr
,
1523 int128_sub(fr
->addr
.start
,
1524 int128_make64(fr
->offset_in_region
)));
1525 if (!addrrange_intersects(tmp
, fr
->addr
)) {
1528 tmp
= addrrange_intersection(tmp
, fr
->addr
);
1529 MEMORY_LISTENER_CALL(coalesced_mmio_add
, Forward
, §ion
,
1530 int128_get64(tmp
.start
),
1531 int128_get64(tmp
.size
));
1535 flatview_unref(view
);
1538 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
1542 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1543 memory_region_update_coalesced_range_as(mr
, as
);
1547 void memory_region_set_coalescing(MemoryRegion
*mr
)
1549 memory_region_clear_coalescing(mr
);
1550 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
1553 void memory_region_add_coalescing(MemoryRegion
*mr
,
1557 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
1559 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
1560 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
1561 memory_region_update_coalesced_range(mr
);
1562 memory_region_set_flush_coalesced(mr
);
1565 void memory_region_clear_coalescing(MemoryRegion
*mr
)
1567 CoalescedMemoryRange
*cmr
;
1568 bool updated
= false;
1570 qemu_flush_coalesced_mmio_buffer();
1571 mr
->flush_coalesced_mmio
= false;
1573 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
1574 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
1575 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
1581 memory_region_update_coalesced_range(mr
);
1585 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
1587 mr
->flush_coalesced_mmio
= true;
1590 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
1592 qemu_flush_coalesced_mmio_buffer();
1593 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
1594 mr
->flush_coalesced_mmio
= false;
1598 void memory_region_add_eventfd(MemoryRegion
*mr
,
1605 MemoryRegionIoeventfd mrfd
= {
1606 .addr
.start
= int128_make64(addr
),
1607 .addr
.size
= int128_make64(size
),
1608 .match_data
= match_data
,
1614 adjust_endianness(mr
, &mrfd
.data
, size
);
1615 memory_region_transaction_begin();
1616 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1617 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
1622 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
1623 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
1624 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
1625 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
1626 mr
->ioeventfds
[i
] = mrfd
;
1627 ioeventfd_update_pending
|= mr
->enabled
;
1628 memory_region_transaction_commit();
1631 void memory_region_del_eventfd(MemoryRegion
*mr
,
1638 MemoryRegionIoeventfd mrfd
= {
1639 .addr
.start
= int128_make64(addr
),
1640 .addr
.size
= int128_make64(size
),
1641 .match_data
= match_data
,
1647 adjust_endianness(mr
, &mrfd
.data
, size
);
1648 memory_region_transaction_begin();
1649 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1650 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
1654 assert(i
!= mr
->ioeventfd_nb
);
1655 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
1656 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
1658 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
1659 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
1660 ioeventfd_update_pending
|= mr
->enabled
;
1661 memory_region_transaction_commit();
1664 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
1666 hwaddr offset
= subregion
->addr
;
1667 MemoryRegion
*mr
= subregion
->container
;
1668 MemoryRegion
*other
;
1670 memory_region_transaction_begin();
1672 memory_region_ref(subregion
);
1673 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
1674 if (subregion
->may_overlap
|| other
->may_overlap
) {
1677 if (int128_ge(int128_make64(offset
),
1678 int128_add(int128_make64(other
->addr
), other
->size
))
1679 || int128_le(int128_add(int128_make64(offset
), subregion
->size
),
1680 int128_make64(other
->addr
))) {
1684 printf("warning: subregion collision %llx/%llx (%s) "
1685 "vs %llx/%llx (%s)\n",
1686 (unsigned long long)offset
,
1687 (unsigned long long)int128_get64(subregion
->size
),
1689 (unsigned long long)other
->addr
,
1690 (unsigned long long)int128_get64(other
->size
),
1694 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
1695 if (subregion
->priority
>= other
->priority
) {
1696 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
1700 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
1702 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
1703 memory_region_transaction_commit();
1706 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
1708 MemoryRegion
*subregion
)
1710 assert(!subregion
->container
);
1711 subregion
->container
= mr
;
1712 subregion
->addr
= offset
;
1713 memory_region_update_container_subregions(subregion
);
1716 void memory_region_add_subregion(MemoryRegion
*mr
,
1718 MemoryRegion
*subregion
)
1720 subregion
->may_overlap
= false;
1721 subregion
->priority
= 0;
1722 memory_region_add_subregion_common(mr
, offset
, subregion
);
1725 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
1727 MemoryRegion
*subregion
,
1730 subregion
->may_overlap
= true;
1731 subregion
->priority
= priority
;
1732 memory_region_add_subregion_common(mr
, offset
, subregion
);
1735 void memory_region_del_subregion(MemoryRegion
*mr
,
1736 MemoryRegion
*subregion
)
1738 memory_region_transaction_begin();
1739 assert(subregion
->container
== mr
);
1740 subregion
->container
= NULL
;
1741 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
1742 memory_region_unref(subregion
);
1743 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
1744 memory_region_transaction_commit();
1747 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
1749 if (enabled
== mr
->enabled
) {
1752 memory_region_transaction_begin();
1753 mr
->enabled
= enabled
;
1754 memory_region_update_pending
= true;
1755 memory_region_transaction_commit();
1758 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
1760 Int128 s
= int128_make64(size
);
1762 if (size
== UINT64_MAX
) {
1765 if (int128_eq(s
, mr
->size
)) {
1768 memory_region_transaction_begin();
1770 memory_region_update_pending
= true;
1771 memory_region_transaction_commit();
1774 static void memory_region_readd_subregion(MemoryRegion
*mr
)
1776 MemoryRegion
*container
= mr
->container
;
1779 memory_region_transaction_begin();
1780 memory_region_ref(mr
);
1781 memory_region_del_subregion(container
, mr
);
1782 mr
->container
= container
;
1783 memory_region_update_container_subregions(mr
);
1784 memory_region_unref(mr
);
1785 memory_region_transaction_commit();
1789 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
1791 if (addr
!= mr
->addr
) {
1793 memory_region_readd_subregion(mr
);
1797 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
1801 if (offset
== mr
->alias_offset
) {
1805 memory_region_transaction_begin();
1806 mr
->alias_offset
= offset
;
1807 memory_region_update_pending
|= mr
->enabled
;
1808 memory_region_transaction_commit();
1811 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
1813 return mr
->ram_addr
;
1816 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
1821 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
1823 const AddrRange
*addr
= addr_
;
1824 const FlatRange
*fr
= fr_
;
1826 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
1828 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
1834 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
1836 return bsearch(&addr
, view
->ranges
, view
->nr
,
1837 sizeof(FlatRange
), cmp_flatrange_addr
);
1840 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
1842 MemoryRegion
*mr
= memory_region_find(container
, addr
, 1).mr
;
1843 if (!mr
|| (mr
== container
)) {
1846 memory_region_unref(mr
);
1850 bool memory_region_is_mapped(MemoryRegion
*mr
)
1852 return mr
->container
? true : false;
1855 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
1856 hwaddr addr
, uint64_t size
)
1858 MemoryRegionSection ret
= { .mr
= NULL
};
1866 for (root
= mr
; root
->container
; ) {
1867 root
= root
->container
;
1871 as
= memory_region_to_address_space(root
);
1875 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
1878 view
= atomic_rcu_read(&as
->current_map
);
1879 fr
= flatview_lookup(view
, range
);
1884 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
1889 ret
.address_space
= as
;
1890 range
= addrrange_intersection(range
, fr
->addr
);
1891 ret
.offset_within_region
= fr
->offset_in_region
;
1892 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
1894 ret
.size
= range
.size
;
1895 ret
.offset_within_address_space
= int128_get64(range
.start
);
1896 ret
.readonly
= fr
->readonly
;
1897 memory_region_ref(ret
.mr
);
1903 void address_space_sync_dirty_bitmap(AddressSpace
*as
)
1908 view
= address_space_get_flatview(as
);
1909 FOR_EACH_FLAT_RANGE(fr
, view
) {
1910 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, log_sync
);
1912 flatview_unref(view
);
1915 void memory_global_dirty_log_start(void)
1917 global_dirty_log
= true;
1918 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
1921 void memory_global_dirty_log_stop(void)
1923 global_dirty_log
= false;
1924 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
1927 static void listener_add_address_space(MemoryListener
*listener
,
1933 if (listener
->address_space_filter
1934 && listener
->address_space_filter
!= as
) {
1938 if (global_dirty_log
) {
1939 if (listener
->log_global_start
) {
1940 listener
->log_global_start(listener
);
1944 view
= address_space_get_flatview(as
);
1945 FOR_EACH_FLAT_RANGE(fr
, view
) {
1946 MemoryRegionSection section
= {
1948 .address_space
= as
,
1949 .offset_within_region
= fr
->offset_in_region
,
1950 .size
= fr
->addr
.size
,
1951 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
1952 .readonly
= fr
->readonly
,
1954 if (listener
->region_add
) {
1955 listener
->region_add(listener
, §ion
);
1958 flatview_unref(view
);
1961 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*filter
)
1963 MemoryListener
*other
= NULL
;
1966 listener
->address_space_filter
= filter
;
1967 if (QTAILQ_EMPTY(&memory_listeners
)
1968 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
,
1969 memory_listeners
)->priority
) {
1970 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
1972 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
1973 if (listener
->priority
< other
->priority
) {
1977 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
1980 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1981 listener_add_address_space(listener
, as
);
1985 void memory_listener_unregister(MemoryListener
*listener
)
1987 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
1990 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
1992 memory_region_ref(root
);
1993 memory_region_transaction_begin();
1995 as
->current_map
= g_new(FlatView
, 1);
1996 flatview_init(as
->current_map
);
1997 as
->ioeventfd_nb
= 0;
1998 as
->ioeventfds
= NULL
;
1999 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2000 as
->name
= g_strdup(name
? name
: "anonymous");
2001 address_space_init_dispatch(as
);
2002 memory_region_update_pending
|= root
->enabled
;
2003 memory_region_transaction_commit();
2006 static void do_address_space_destroy(AddressSpace
*as
)
2008 MemoryListener
*listener
;
2010 address_space_destroy_dispatch(as
);
2012 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2013 assert(listener
->address_space_filter
!= as
);
2016 flatview_unref(as
->current_map
);
2018 g_free(as
->ioeventfds
);
2019 memory_region_unref(as
->root
);
2022 void address_space_destroy(AddressSpace
*as
)
2024 MemoryRegion
*root
= as
->root
;
2026 /* Flush out anything from MemoryListeners listening in on this */
2027 memory_region_transaction_begin();
2029 memory_region_transaction_commit();
2030 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2031 address_space_unregister(as
);
2033 /* At this point, as->dispatch and as->current_map are dummy
2034 * entries that the guest should never use. Wait for the old
2035 * values to expire before freeing the data.
2038 call_rcu(as
, do_address_space_destroy
, rcu
);
2041 bool io_mem_read(MemoryRegion
*mr
, hwaddr addr
, uint64_t *pval
, unsigned size
)
2043 return memory_region_dispatch_read(mr
, addr
, pval
, size
);
2046 bool io_mem_write(MemoryRegion
*mr
, hwaddr addr
,
2047 uint64_t val
, unsigned size
)
2049 return memory_region_dispatch_write(mr
, addr
, val
, size
);
2052 typedef struct MemoryRegionList MemoryRegionList
;
2054 struct MemoryRegionList
{
2055 const MemoryRegion
*mr
;
2056 QTAILQ_ENTRY(MemoryRegionList
) queue
;
2059 typedef QTAILQ_HEAD(queue
, MemoryRegionList
) MemoryRegionListHead
;
2061 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
2062 const MemoryRegion
*mr
, unsigned int level
,
2064 MemoryRegionListHead
*alias_print_queue
)
2066 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2067 MemoryRegionListHead submr_print_queue
;
2068 const MemoryRegion
*submr
;
2071 if (!mr
|| !mr
->enabled
) {
2075 for (i
= 0; i
< level
; i
++) {
2080 MemoryRegionList
*ml
;
2083 /* check if the alias is already in the queue */
2084 QTAILQ_FOREACH(ml
, alias_print_queue
, queue
) {
2085 if (ml
->mr
== mr
->alias
) {
2091 ml
= g_new(MemoryRegionList
, 1);
2093 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, queue
);
2095 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
2096 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
2097 "-" TARGET_FMT_plx
"\n",
2100 + (int128_nz(mr
->size
) ?
2101 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2102 int128_one())) : 0),
2104 mr
->romd_mode
? 'R' : '-',
2105 !mr
->readonly
&& !(mr
->rom_device
&& mr
->romd_mode
) ? 'W'
2107 memory_region_name(mr
),
2108 memory_region_name(mr
->alias
),
2111 + (int128_nz(mr
->size
) ?
2112 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2113 int128_one())) : 0));
2116 TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d, %c%c): %s\n",
2119 + (int128_nz(mr
->size
) ?
2120 (hwaddr
)int128_get64(int128_sub(mr
->size
,
2121 int128_one())) : 0),
2123 mr
->romd_mode
? 'R' : '-',
2124 !mr
->readonly
&& !(mr
->rom_device
&& mr
->romd_mode
) ? 'W'
2126 memory_region_name(mr
));
2129 QTAILQ_INIT(&submr_print_queue
);
2131 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2132 new_ml
= g_new(MemoryRegionList
, 1);
2134 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2135 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2136 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2137 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2138 QTAILQ_INSERT_BEFORE(ml
, new_ml
, queue
);
2144 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, queue
);
2148 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
2149 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, base
+ mr
->addr
,
2153 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, queue
, next_ml
) {
2158 void mtree_info(fprintf_function mon_printf
, void *f
)
2160 MemoryRegionListHead ml_head
;
2161 MemoryRegionList
*ml
, *ml2
;
2164 QTAILQ_INIT(&ml_head
);
2166 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2167 mon_printf(f
, "%s\n", as
->name
);
2168 mtree_print_mr(mon_printf
, f
, as
->root
, 0, 0, &ml_head
);
2171 mon_printf(f
, "aliases\n");
2172 /* print aliased regions */
2173 QTAILQ_FOREACH(ml
, &ml_head
, queue
) {
2174 mon_printf(f
, "%s\n", memory_region_name(ml
->mr
));
2175 mtree_print_mr(mon_printf
, f
, ml
->mr
, 0, 0, &ml_head
);
2178 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, queue
, ml2
) {
2183 static const TypeInfo memory_region_info
= {
2184 .parent
= TYPE_OBJECT
,
2185 .name
= TYPE_MEMORY_REGION
,
2186 .instance_size
= sizeof(MemoryRegion
),
2187 .instance_init
= memory_region_initfn
,
2188 .instance_finalize
= memory_region_finalize
,
2191 static void memory_register_types(void)
2193 type_register_static(&memory_region_info
);
2196 type_init(memory_register_types
)