2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "exec-memory.h"
21 unsigned memory_region_transaction_depth
= 0;
23 typedef struct AddrRange AddrRange
;
26 * Note using signed integers limits us to physical addresses at most
27 * 63 bits wide. They are needed for negative offsetting in aliases
28 * (large MemoryRegion::alias_offset).
35 static AddrRange
addrrange_make(int64_t start
, int64_t size
)
37 return (AddrRange
) { start
, size
};
40 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
42 return r1
.start
== r2
.start
&& r1
.size
== r2
.size
;
45 static int64_t addrrange_end(AddrRange r
)
47 return r
.start
+ r
.size
;
50 static AddrRange
addrrange_shift(AddrRange range
, int64_t delta
)
56 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
58 return (r1
.start
>= r2
.start
&& (r1
.start
- r2
.start
) < r2
.size
)
59 || (r2
.start
>= r1
.start
&& (r2
.start
- r1
.start
) < r1
.size
);
62 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
64 int64_t start
= MAX(r1
.start
, r2
.start
);
65 /* off-by-one arithmetic to prevent overflow */
66 int64_t end
= MIN(addrrange_end(r1
) - 1, addrrange_end(r2
) - 1);
67 return addrrange_make(start
, end
- start
+ 1);
70 struct CoalescedMemoryRange
{
72 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
75 struct MemoryRegionIoeventfd
{
82 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a
,
83 MemoryRegionIoeventfd b
)
85 if (a
.addr
.start
< b
.addr
.start
) {
87 } else if (a
.addr
.start
> b
.addr
.start
) {
89 } else if (a
.addr
.size
< b
.addr
.size
) {
91 } else if (a
.addr
.size
> b
.addr
.size
) {
93 } else if (a
.match_data
< b
.match_data
) {
95 } else if (a
.match_data
> b
.match_data
) {
97 } else if (a
.match_data
) {
98 if (a
.data
< b
.data
) {
100 } else if (a
.data
> b
.data
) {
106 } else if (a
.fd
> b
.fd
) {
112 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a
,
113 MemoryRegionIoeventfd b
)
115 return !memory_region_ioeventfd_before(a
, b
)
116 && !memory_region_ioeventfd_before(b
, a
);
119 typedef struct FlatRange FlatRange
;
120 typedef struct FlatView FlatView
;
122 /* Range of memory in the global map. Addresses are absolute. */
125 target_phys_addr_t offset_in_region
;
127 uint8_t dirty_log_mask
;
132 /* Flattened global view of current active memory hierarchy. Kept in sorted
138 unsigned nr_allocated
;
141 typedef struct AddressSpace AddressSpace
;
142 typedef struct AddressSpaceOps AddressSpaceOps
;
144 /* A system address space - I/O, memory, etc. */
145 struct AddressSpace
{
146 const AddressSpaceOps
*ops
;
148 FlatView current_map
;
150 MemoryRegionIoeventfd
*ioeventfds
;
153 struct AddressSpaceOps
{
154 void (*range_add
)(AddressSpace
*as
, FlatRange
*fr
);
155 void (*range_del
)(AddressSpace
*as
, FlatRange
*fr
);
156 void (*log_start
)(AddressSpace
*as
, FlatRange
*fr
);
157 void (*log_stop
)(AddressSpace
*as
, FlatRange
*fr
);
158 void (*ioeventfd_add
)(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
);
159 void (*ioeventfd_del
)(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
);
162 #define FOR_EACH_FLAT_RANGE(var, view) \
163 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
165 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
167 return a
->mr
== b
->mr
168 && addrrange_equal(a
->addr
, b
->addr
)
169 && a
->offset_in_region
== b
->offset_in_region
170 && a
->readable
== b
->readable
171 && a
->readonly
== b
->readonly
;
174 static void flatview_init(FlatView
*view
)
178 view
->nr_allocated
= 0;
181 /* Insert a range into a given position. Caller is responsible for maintaining
184 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
186 if (view
->nr
== view
->nr_allocated
) {
187 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
188 view
->ranges
= g_realloc(view
->ranges
,
189 view
->nr_allocated
* sizeof(*view
->ranges
));
191 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
192 (view
->nr
- pos
) * sizeof(FlatRange
));
193 view
->ranges
[pos
] = *range
;
197 static void flatview_destroy(FlatView
*view
)
199 g_free(view
->ranges
);
202 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
204 return addrrange_end(r1
->addr
) == r2
->addr
.start
206 && r1
->offset_in_region
+ r1
->addr
.size
== r2
->offset_in_region
207 && r1
->dirty_log_mask
== r2
->dirty_log_mask
208 && r1
->readable
== r2
->readable
209 && r1
->readonly
== r2
->readonly
;
212 /* Attempt to simplify a view by merging ajacent ranges */
213 static void flatview_simplify(FlatView
*view
)
218 while (i
< view
->nr
) {
221 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
222 view
->ranges
[i
].addr
.size
+= view
->ranges
[j
].addr
.size
;
226 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
227 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
232 static void memory_region_read_accessor(void *opaque
,
233 target_phys_addr_t addr
,
239 MemoryRegion
*mr
= opaque
;
242 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
243 *value
|= (tmp
& mask
) << shift
;
246 static void memory_region_write_accessor(void *opaque
,
247 target_phys_addr_t addr
,
253 MemoryRegion
*mr
= opaque
;
256 tmp
= (*value
>> shift
) & mask
;
257 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
260 static void access_with_adjusted_size(target_phys_addr_t addr
,
263 unsigned access_size_min
,
264 unsigned access_size_max
,
265 void (*access
)(void *opaque
,
266 target_phys_addr_t addr
,
273 uint64_t access_mask
;
274 unsigned access_size
;
277 if (!access_size_min
) {
280 if (!access_size_max
) {
283 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
284 access_mask
= -1ULL >> (64 - access_size
* 8);
285 for (i
= 0; i
< size
; i
+= access_size
) {
286 /* FIXME: big-endian support */
287 access(opaque
, addr
+ i
, value
, access_size
, i
* 8, access_mask
);
291 static void memory_region_prepare_ram_addr(MemoryRegion
*mr
);
293 static void as_memory_range_add(AddressSpace
*as
, FlatRange
*fr
)
295 ram_addr_t phys_offset
, region_offset
;
297 memory_region_prepare_ram_addr(fr
->mr
);
299 phys_offset
= fr
->mr
->ram_addr
;
300 region_offset
= fr
->offset_in_region
;
301 /* cpu_register_physical_memory_log() wants region_offset for
302 * mmio, but prefers offseting phys_offset for RAM. Humour it.
304 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
305 phys_offset
+= region_offset
;
310 phys_offset
&= ~TARGET_PAGE_MASK
& ~IO_MEM_ROMD
;
314 phys_offset
|= IO_MEM_ROM
;
317 cpu_register_physical_memory_log(fr
->addr
.start
,
324 static void as_memory_range_del(AddressSpace
*as
, FlatRange
*fr
)
326 if (fr
->dirty_log_mask
) {
327 cpu_physical_sync_dirty_bitmap(fr
->addr
.start
,
328 fr
->addr
.start
+ fr
->addr
.size
);
330 cpu_register_physical_memory(fr
->addr
.start
, fr
->addr
.size
,
334 static void as_memory_log_start(AddressSpace
*as
, FlatRange
*fr
)
336 cpu_physical_log_start(fr
->addr
.start
, fr
->addr
.size
);
339 static void as_memory_log_stop(AddressSpace
*as
, FlatRange
*fr
)
341 cpu_physical_log_stop(fr
->addr
.start
, fr
->addr
.size
);
344 static void as_memory_ioeventfd_add(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
)
348 assert(fd
->match_data
&& fd
->addr
.size
== 4);
350 r
= kvm_set_ioeventfd_mmio_long(fd
->fd
, fd
->addr
.start
, fd
->data
, true);
356 static void as_memory_ioeventfd_del(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
)
360 r
= kvm_set_ioeventfd_mmio_long(fd
->fd
, fd
->addr
.start
, fd
->data
, false);
366 static const AddressSpaceOps address_space_ops_memory
= {
367 .range_add
= as_memory_range_add
,
368 .range_del
= as_memory_range_del
,
369 .log_start
= as_memory_log_start
,
370 .log_stop
= as_memory_log_stop
,
371 .ioeventfd_add
= as_memory_ioeventfd_add
,
372 .ioeventfd_del
= as_memory_ioeventfd_del
,
375 static AddressSpace address_space_memory
= {
376 .ops
= &address_space_ops_memory
,
379 static const MemoryRegionPortio
*find_portio(MemoryRegion
*mr
, uint64_t offset
,
380 unsigned width
, bool write
)
382 const MemoryRegionPortio
*mrp
;
384 for (mrp
= mr
->ops
->old_portio
; mrp
->size
; ++mrp
) {
385 if (offset
>= mrp
->offset
&& offset
< mrp
->offset
+ mrp
->len
386 && width
== mrp
->size
387 && (write
? (bool)mrp
->write
: (bool)mrp
->read
)) {
394 static void memory_region_iorange_read(IORange
*iorange
,
399 MemoryRegion
*mr
= container_of(iorange
, MemoryRegion
, iorange
);
401 if (mr
->ops
->old_portio
) {
402 const MemoryRegionPortio
*mrp
= find_portio(mr
, offset
, width
, false);
404 *data
= ((uint64_t)1 << (width
* 8)) - 1;
406 *data
= mrp
->read(mr
->opaque
, offset
+ mr
->offset
);
407 } else if (width
== 2) {
408 mrp
= find_portio(mr
, offset
, 1, false);
410 *data
= mrp
->read(mr
->opaque
, offset
+ mr
->offset
) |
411 (mrp
->read(mr
->opaque
, offset
+ mr
->offset
+ 1) << 8);
416 access_with_adjusted_size(offset
+ mr
->offset
, data
, width
,
417 mr
->ops
->impl
.min_access_size
,
418 mr
->ops
->impl
.max_access_size
,
419 memory_region_read_accessor
, mr
);
422 static void memory_region_iorange_write(IORange
*iorange
,
427 MemoryRegion
*mr
= container_of(iorange
, MemoryRegion
, iorange
);
429 if (mr
->ops
->old_portio
) {
430 const MemoryRegionPortio
*mrp
= find_portio(mr
, offset
, width
, true);
433 mrp
->write(mr
->opaque
, offset
+ mr
->offset
, data
);
434 } else if (width
== 2) {
435 mrp
= find_portio(mr
, offset
, 1, false);
437 mrp
->write(mr
->opaque
, offset
+ mr
->offset
, data
& 0xff);
438 mrp
->write(mr
->opaque
, offset
+ mr
->offset
+ 1, data
>> 8);
442 access_with_adjusted_size(offset
+ mr
->offset
, &data
, width
,
443 mr
->ops
->impl
.min_access_size
,
444 mr
->ops
->impl
.max_access_size
,
445 memory_region_write_accessor
, mr
);
448 static const IORangeOps memory_region_iorange_ops
= {
449 .read
= memory_region_iorange_read
,
450 .write
= memory_region_iorange_write
,
453 static void as_io_range_add(AddressSpace
*as
, FlatRange
*fr
)
455 iorange_init(&fr
->mr
->iorange
, &memory_region_iorange_ops
,
456 fr
->addr
.start
,fr
->addr
.size
);
457 ioport_register(&fr
->mr
->iorange
);
460 static void as_io_range_del(AddressSpace
*as
, FlatRange
*fr
)
462 isa_unassign_ioport(fr
->addr
.start
, fr
->addr
.size
);
465 static void as_io_ioeventfd_add(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
)
469 assert(fd
->match_data
&& fd
->addr
.size
== 2);
471 r
= kvm_set_ioeventfd_pio_word(fd
->fd
, fd
->addr
.start
, fd
->data
, true);
477 static void as_io_ioeventfd_del(AddressSpace
*as
, MemoryRegionIoeventfd
*fd
)
481 r
= kvm_set_ioeventfd_pio_word(fd
->fd
, fd
->addr
.start
, fd
->data
, false);
487 static const AddressSpaceOps address_space_ops_io
= {
488 .range_add
= as_io_range_add
,
489 .range_del
= as_io_range_del
,
490 .ioeventfd_add
= as_io_ioeventfd_add
,
491 .ioeventfd_del
= as_io_ioeventfd_del
,
494 static AddressSpace address_space_io
= {
495 .ops
= &address_space_ops_io
,
498 /* Render a memory region into the global view. Ranges in @view obscure
501 static void render_memory_region(FlatView
*view
,
503 target_phys_addr_t base
,
507 MemoryRegion
*subregion
;
509 target_phys_addr_t offset_in_region
;
516 readonly
|= mr
->readonly
;
518 tmp
= addrrange_make(base
, mr
->size
);
520 if (!addrrange_intersects(tmp
, clip
)) {
524 clip
= addrrange_intersection(tmp
, clip
);
527 base
-= mr
->alias
->addr
;
528 base
-= mr
->alias_offset
;
529 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
533 /* Render subregions in priority order. */
534 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
535 render_memory_region(view
, subregion
, base
, clip
, readonly
);
538 if (!mr
->terminates
) {
542 offset_in_region
= clip
.start
- base
;
546 /* Render the region itself into any gaps left by the current view. */
547 for (i
= 0; i
< view
->nr
&& remain
; ++i
) {
548 if (base
>= addrrange_end(view
->ranges
[i
].addr
)) {
551 if (base
< view
->ranges
[i
].addr
.start
) {
552 now
= MIN(remain
, view
->ranges
[i
].addr
.start
- base
);
554 fr
.offset_in_region
= offset_in_region
;
555 fr
.addr
= addrrange_make(base
, now
);
556 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
557 fr
.readable
= mr
->readable
;
558 fr
.readonly
= readonly
;
559 flatview_insert(view
, i
, &fr
);
562 offset_in_region
+= now
;
565 if (base
== view
->ranges
[i
].addr
.start
) {
566 now
= MIN(remain
, view
->ranges
[i
].addr
.size
);
568 offset_in_region
+= now
;
574 fr
.offset_in_region
= offset_in_region
;
575 fr
.addr
= addrrange_make(base
, remain
);
576 fr
.dirty_log_mask
= mr
->dirty_log_mask
;
577 fr
.readable
= mr
->readable
;
578 fr
.readonly
= readonly
;
579 flatview_insert(view
, i
, &fr
);
583 /* Render a memory topology into a list of disjoint absolute ranges. */
584 static FlatView
generate_memory_topology(MemoryRegion
*mr
)
588 flatview_init(&view
);
590 render_memory_region(&view
, mr
, 0, addrrange_make(0, INT64_MAX
), false);
591 flatview_simplify(&view
);
596 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
597 MemoryRegionIoeventfd
*fds_new
,
599 MemoryRegionIoeventfd
*fds_old
,
604 /* Generate a symmetric difference of the old and new fd sets, adding
605 * and deleting as necessary.
609 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
610 if (iold
< fds_old_nb
611 && (inew
== fds_new_nb
612 || memory_region_ioeventfd_before(fds_old
[iold
],
614 as
->ops
->ioeventfd_del(as
, &fds_old
[iold
]);
616 } else if (inew
< fds_new_nb
617 && (iold
== fds_old_nb
618 || memory_region_ioeventfd_before(fds_new
[inew
],
620 as
->ops
->ioeventfd_add(as
, &fds_new
[inew
]);
629 static void address_space_update_ioeventfds(AddressSpace
*as
)
632 unsigned ioeventfd_nb
= 0;
633 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
637 FOR_EACH_FLAT_RANGE(fr
, &as
->current_map
) {
638 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
639 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
640 fr
->addr
.start
- fr
->offset_in_region
);
641 if (addrrange_intersects(fr
->addr
, tmp
)) {
643 ioeventfds
= g_realloc(ioeventfds
,
644 ioeventfd_nb
* sizeof(*ioeventfds
));
645 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
646 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
651 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
652 as
->ioeventfds
, as
->ioeventfd_nb
);
654 g_free(as
->ioeventfds
);
655 as
->ioeventfds
= ioeventfds
;
656 as
->ioeventfd_nb
= ioeventfd_nb
;
659 static void address_space_update_topology_pass(AddressSpace
*as
,
665 FlatRange
*frold
, *frnew
;
667 /* Generate a symmetric difference of the old and new memory maps.
668 * Kill ranges in the old map, and instantiate ranges in the new map.
671 while (iold
< old_view
.nr
|| inew
< new_view
.nr
) {
672 if (iold
< old_view
.nr
) {
673 frold
= &old_view
.ranges
[iold
];
677 if (inew
< new_view
.nr
) {
678 frnew
= &new_view
.ranges
[inew
];
685 || frold
->addr
.start
< frnew
->addr
.start
686 || (frold
->addr
.start
== frnew
->addr
.start
687 && !flatrange_equal(frold
, frnew
)))) {
688 /* In old, but (not in new, or in new but attributes changed). */
691 as
->ops
->range_del(as
, frold
);
695 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
696 /* In both (logging may have changed) */
699 if (frold
->dirty_log_mask
&& !frnew
->dirty_log_mask
) {
700 as
->ops
->log_stop(as
, frnew
);
701 } else if (frnew
->dirty_log_mask
&& !frold
->dirty_log_mask
) {
702 as
->ops
->log_start(as
, frnew
);
712 as
->ops
->range_add(as
, frnew
);
721 static void address_space_update_topology(AddressSpace
*as
)
723 FlatView old_view
= as
->current_map
;
724 FlatView new_view
= generate_memory_topology(as
->root
);
726 address_space_update_topology_pass(as
, old_view
, new_view
, false);
727 address_space_update_topology_pass(as
, old_view
, new_view
, true);
729 as
->current_map
= new_view
;
730 flatview_destroy(&old_view
);
731 address_space_update_ioeventfds(as
);
734 static void memory_region_update_topology(void)
736 if (memory_region_transaction_depth
) {
740 if (address_space_memory
.root
) {
741 address_space_update_topology(&address_space_memory
);
743 if (address_space_io
.root
) {
744 address_space_update_topology(&address_space_io
);
748 void memory_region_transaction_begin(void)
750 ++memory_region_transaction_depth
;
753 void memory_region_transaction_commit(void)
755 assert(memory_region_transaction_depth
);
756 --memory_region_transaction_depth
;
757 memory_region_update_topology();
760 static void memory_region_destructor_none(MemoryRegion
*mr
)
764 static void memory_region_destructor_ram(MemoryRegion
*mr
)
766 qemu_ram_free(mr
->ram_addr
);
769 static void memory_region_destructor_ram_from_ptr(MemoryRegion
*mr
)
771 qemu_ram_free_from_ptr(mr
->ram_addr
);
774 static void memory_region_destructor_iomem(MemoryRegion
*mr
)
776 cpu_unregister_io_memory(mr
->ram_addr
);
779 static void memory_region_destructor_rom_device(MemoryRegion
*mr
)
781 qemu_ram_free(mr
->ram_addr
& TARGET_PAGE_MASK
);
782 cpu_unregister_io_memory(mr
->ram_addr
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
785 void memory_region_init(MemoryRegion
*mr
,
794 mr
->terminates
= false;
796 mr
->readonly
= false;
797 mr
->destructor
= memory_region_destructor_none
;
799 mr
->may_overlap
= false;
801 QTAILQ_INIT(&mr
->subregions
);
802 memset(&mr
->subregions_link
, 0, sizeof mr
->subregions_link
);
803 QTAILQ_INIT(&mr
->coalesced
);
804 mr
->name
= g_strdup(name
);
805 mr
->dirty_log_mask
= 0;
806 mr
->ioeventfd_nb
= 0;
807 mr
->ioeventfds
= NULL
;
810 static bool memory_region_access_valid(MemoryRegion
*mr
,
811 target_phys_addr_t addr
,
814 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
818 /* Treat zero as compatibility all valid */
819 if (!mr
->ops
->valid
.max_access_size
) {
823 if (size
> mr
->ops
->valid
.max_access_size
824 || size
< mr
->ops
->valid
.min_access_size
) {
830 static uint32_t memory_region_read_thunk_n(void *_mr
,
831 target_phys_addr_t addr
,
834 MemoryRegion
*mr
= _mr
;
837 if (!memory_region_access_valid(mr
, addr
, size
)) {
838 return -1U; /* FIXME: better signalling */
841 if (!mr
->ops
->read
) {
842 return mr
->ops
->old_mmio
.read
[bitops_ffsl(size
)](mr
->opaque
, addr
);
845 /* FIXME: support unaligned access */
846 access_with_adjusted_size(addr
+ mr
->offset
, &data
, size
,
847 mr
->ops
->impl
.min_access_size
,
848 mr
->ops
->impl
.max_access_size
,
849 memory_region_read_accessor
, mr
);
854 static void memory_region_write_thunk_n(void *_mr
,
855 target_phys_addr_t addr
,
859 MemoryRegion
*mr
= _mr
;
861 if (!memory_region_access_valid(mr
, addr
, size
)) {
862 return; /* FIXME: better signalling */
865 if (!mr
->ops
->write
) {
866 mr
->ops
->old_mmio
.write
[bitops_ffsl(size
)](mr
->opaque
, addr
, data
);
870 /* FIXME: support unaligned access */
871 access_with_adjusted_size(addr
+ mr
->offset
, &data
, size
,
872 mr
->ops
->impl
.min_access_size
,
873 mr
->ops
->impl
.max_access_size
,
874 memory_region_write_accessor
, mr
);
877 static uint32_t memory_region_read_thunk_b(void *mr
, target_phys_addr_t addr
)
879 return memory_region_read_thunk_n(mr
, addr
, 1);
882 static uint32_t memory_region_read_thunk_w(void *mr
, target_phys_addr_t addr
)
884 return memory_region_read_thunk_n(mr
, addr
, 2);
887 static uint32_t memory_region_read_thunk_l(void *mr
, target_phys_addr_t addr
)
889 return memory_region_read_thunk_n(mr
, addr
, 4);
892 static void memory_region_write_thunk_b(void *mr
, target_phys_addr_t addr
,
895 memory_region_write_thunk_n(mr
, addr
, 1, data
);
898 static void memory_region_write_thunk_w(void *mr
, target_phys_addr_t addr
,
901 memory_region_write_thunk_n(mr
, addr
, 2, data
);
904 static void memory_region_write_thunk_l(void *mr
, target_phys_addr_t addr
,
907 memory_region_write_thunk_n(mr
, addr
, 4, data
);
910 static CPUReadMemoryFunc
* const memory_region_read_thunk
[] = {
911 memory_region_read_thunk_b
,
912 memory_region_read_thunk_w
,
913 memory_region_read_thunk_l
,
916 static CPUWriteMemoryFunc
* const memory_region_write_thunk
[] = {
917 memory_region_write_thunk_b
,
918 memory_region_write_thunk_w
,
919 memory_region_write_thunk_l
,
922 static void memory_region_prepare_ram_addr(MemoryRegion
*mr
)
924 if (mr
->backend_registered
) {
928 mr
->destructor
= memory_region_destructor_iomem
;
929 mr
->ram_addr
= cpu_register_io_memory(memory_region_read_thunk
,
930 memory_region_write_thunk
,
932 mr
->ops
->endianness
);
933 mr
->backend_registered
= true;
936 void memory_region_init_io(MemoryRegion
*mr
,
937 const MemoryRegionOps
*ops
,
942 memory_region_init(mr
, name
, size
);
945 mr
->terminates
= true;
946 mr
->backend_registered
= false;
949 void memory_region_init_ram(MemoryRegion
*mr
,
954 memory_region_init(mr
, name
, size
);
955 mr
->terminates
= true;
956 mr
->destructor
= memory_region_destructor_ram
;
957 mr
->ram_addr
= qemu_ram_alloc(dev
, name
, size
);
958 mr
->backend_registered
= true;
961 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
967 memory_region_init(mr
, name
, size
);
968 mr
->terminates
= true;
969 mr
->destructor
= memory_region_destructor_ram_from_ptr
;
970 mr
->ram_addr
= qemu_ram_alloc_from_ptr(dev
, name
, size
, ptr
);
971 mr
->backend_registered
= true;
974 void memory_region_init_alias(MemoryRegion
*mr
,
977 target_phys_addr_t offset
,
980 memory_region_init(mr
, name
, size
);
982 mr
->alias_offset
= offset
;
985 void memory_region_init_rom_device(MemoryRegion
*mr
,
986 const MemoryRegionOps
*ops
,
992 memory_region_init(mr
, name
, size
);
995 mr
->terminates
= true;
996 mr
->destructor
= memory_region_destructor_rom_device
;
997 mr
->ram_addr
= qemu_ram_alloc(dev
, name
, size
);
998 mr
->ram_addr
|= cpu_register_io_memory(memory_region_read_thunk
,
999 memory_region_write_thunk
,
1001 mr
->ops
->endianness
);
1002 mr
->ram_addr
|= IO_MEM_ROMD
;
1003 mr
->backend_registered
= true;
1006 void memory_region_destroy(MemoryRegion
*mr
)
1008 assert(QTAILQ_EMPTY(&mr
->subregions
));
1010 memory_region_clear_coalescing(mr
);
1011 g_free((char *)mr
->name
);
1012 g_free(mr
->ioeventfds
);
1015 uint64_t memory_region_size(MemoryRegion
*mr
)
1020 void memory_region_set_offset(MemoryRegion
*mr
, target_phys_addr_t offset
)
1022 mr
->offset
= offset
;
1025 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
1027 uint8_t mask
= 1 << client
;
1029 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
1030 memory_region_update_topology();
1033 bool memory_region_get_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
1036 assert(mr
->terminates
);
1037 return cpu_physical_memory_get_dirty(mr
->ram_addr
+ addr
, 1 << client
);
1040 void memory_region_set_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
)
1042 assert(mr
->terminates
);
1043 return cpu_physical_memory_set_dirty(mr
->ram_addr
+ addr
);
1046 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
1050 FOR_EACH_FLAT_RANGE(fr
, &address_space_memory
.current_map
) {
1052 cpu_physical_sync_dirty_bitmap(fr
->addr
.start
,
1053 fr
->addr
.start
+ fr
->addr
.size
);
1058 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
1060 if (mr
->readonly
!= readonly
) {
1061 mr
->readonly
= readonly
;
1062 memory_region_update_topology();
1066 void memory_region_rom_device_set_readable(MemoryRegion
*mr
, bool readable
)
1068 if (mr
->readable
!= readable
) {
1069 mr
->readable
= readable
;
1070 memory_region_update_topology();
1074 void memory_region_reset_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
1075 target_phys_addr_t size
, unsigned client
)
1077 assert(mr
->terminates
);
1078 cpu_physical_memory_reset_dirty(mr
->ram_addr
+ addr
,
1079 mr
->ram_addr
+ addr
+ size
,
1083 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
1086 return memory_region_get_ram_ptr(mr
->alias
) + mr
->alias_offset
;
1089 assert(mr
->terminates
);
1091 return qemu_get_ram_ptr(mr
->ram_addr
& TARGET_PAGE_MASK
);
1094 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
1097 CoalescedMemoryRange
*cmr
;
1100 FOR_EACH_FLAT_RANGE(fr
, &address_space_memory
.current_map
) {
1102 qemu_unregister_coalesced_mmio(fr
->addr
.start
, fr
->addr
.size
);
1103 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
1104 tmp
= addrrange_shift(cmr
->addr
,
1105 fr
->addr
.start
- fr
->offset_in_region
);
1106 if (!addrrange_intersects(tmp
, fr
->addr
)) {
1109 tmp
= addrrange_intersection(tmp
, fr
->addr
);
1110 qemu_register_coalesced_mmio(tmp
.start
, tmp
.size
);
1116 void memory_region_set_coalescing(MemoryRegion
*mr
)
1118 memory_region_clear_coalescing(mr
);
1119 memory_region_add_coalescing(mr
, 0, mr
->size
);
1122 void memory_region_add_coalescing(MemoryRegion
*mr
,
1123 target_phys_addr_t offset
,
1126 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
1128 cmr
->addr
= addrrange_make(offset
, size
);
1129 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
1130 memory_region_update_coalesced_range(mr
);
1133 void memory_region_clear_coalescing(MemoryRegion
*mr
)
1135 CoalescedMemoryRange
*cmr
;
1137 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
1138 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
1139 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
1142 memory_region_update_coalesced_range(mr
);
1145 void memory_region_add_eventfd(MemoryRegion
*mr
,
1146 target_phys_addr_t addr
,
1152 MemoryRegionIoeventfd mrfd
= {
1155 .match_data
= match_data
,
1161 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1162 if (memory_region_ioeventfd_before(mrfd
, mr
->ioeventfds
[i
])) {
1167 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
1168 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
1169 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
1170 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
1171 mr
->ioeventfds
[i
] = mrfd
;
1172 memory_region_update_topology();
1175 void memory_region_del_eventfd(MemoryRegion
*mr
,
1176 target_phys_addr_t addr
,
1182 MemoryRegionIoeventfd mrfd
= {
1185 .match_data
= match_data
,
1191 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
1192 if (memory_region_ioeventfd_equal(mrfd
, mr
->ioeventfds
[i
])) {
1196 assert(i
!= mr
->ioeventfd_nb
);
1197 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
1198 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
1200 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
1201 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
1202 memory_region_update_topology();
1205 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
1206 target_phys_addr_t offset
,
1207 MemoryRegion
*subregion
)
1209 MemoryRegion
*other
;
1211 assert(!subregion
->parent
);
1212 subregion
->parent
= mr
;
1213 subregion
->addr
= offset
;
1214 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
1215 if (subregion
->may_overlap
|| other
->may_overlap
) {
1218 if (offset
>= other
->addr
+ other
->size
1219 || offset
+ subregion
->size
<= other
->addr
) {
1223 printf("warning: subregion collision %llx/%llx (%s) "
1224 "vs %llx/%llx (%s)\n",
1225 (unsigned long long)offset
,
1226 (unsigned long long)subregion
->size
,
1228 (unsigned long long)other
->addr
,
1229 (unsigned long long)other
->size
,
1233 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
1234 if (subregion
->priority
>= other
->priority
) {
1235 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
1239 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
1241 memory_region_update_topology();
1245 void memory_region_add_subregion(MemoryRegion
*mr
,
1246 target_phys_addr_t offset
,
1247 MemoryRegion
*subregion
)
1249 subregion
->may_overlap
= false;
1250 subregion
->priority
= 0;
1251 memory_region_add_subregion_common(mr
, offset
, subregion
);
1254 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
1255 target_phys_addr_t offset
,
1256 MemoryRegion
*subregion
,
1259 subregion
->may_overlap
= true;
1260 subregion
->priority
= priority
;
1261 memory_region_add_subregion_common(mr
, offset
, subregion
);
1264 void memory_region_del_subregion(MemoryRegion
*mr
,
1265 MemoryRegion
*subregion
)
1267 assert(subregion
->parent
== mr
);
1268 subregion
->parent
= NULL
;
1269 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
1270 memory_region_update_topology();
1273 void set_system_memory_map(MemoryRegion
*mr
)
1275 address_space_memory
.root
= mr
;
1276 memory_region_update_topology();
1279 void set_system_io_map(MemoryRegion
*mr
)
1281 address_space_io
.root
= mr
;
1282 memory_region_update_topology();
1285 typedef struct MemoryRegionList MemoryRegionList
;
1287 struct MemoryRegionList
{
1288 const MemoryRegion
*mr
;
1290 QTAILQ_ENTRY(MemoryRegionList
) queue
;
1293 typedef QTAILQ_HEAD(queue
, MemoryRegionList
) MemoryRegionListHead
;
1295 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
1296 const MemoryRegion
*mr
, unsigned int level
,
1297 target_phys_addr_t base
,
1298 MemoryRegionListHead
*alias_print_queue
)
1300 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
1301 MemoryRegionListHead submr_print_queue
;
1302 const MemoryRegion
*submr
;
1309 for (i
= 0; i
< level
; i
++) {
1314 MemoryRegionList
*ml
;
1317 /* check if the alias is already in the queue */
1318 QTAILQ_FOREACH(ml
, alias_print_queue
, queue
) {
1319 if (ml
->mr
== mr
->alias
&& !ml
->printed
) {
1325 ml
= g_new(MemoryRegionList
, 1);
1327 ml
->printed
= false;
1328 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, queue
);
1330 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d): alias %s @%s "
1331 TARGET_FMT_plx
"-" TARGET_FMT_plx
"\n",
1333 base
+ mr
->addr
+ (target_phys_addr_t
)mr
->size
- 1,
1338 mr
->alias_offset
+ (target_phys_addr_t
)mr
->size
- 1);
1340 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d): %s\n",
1342 base
+ mr
->addr
+ (target_phys_addr_t
)mr
->size
- 1,
1347 QTAILQ_INIT(&submr_print_queue
);
1349 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
1350 new_ml
= g_new(MemoryRegionList
, 1);
1352 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
1353 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
1354 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
1355 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
1356 QTAILQ_INSERT_BEFORE(ml
, new_ml
, queue
);
1362 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, queue
);
1366 QTAILQ_FOREACH(ml
, &submr_print_queue
, queue
) {
1367 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, base
+ mr
->addr
,
1371 QTAILQ_FOREACH_SAFE(next_ml
, &submr_print_queue
, queue
, ml
) {
1376 void mtree_info(fprintf_function mon_printf
, void *f
)
1378 MemoryRegionListHead ml_head
;
1379 MemoryRegionList
*ml
, *ml2
;
1381 QTAILQ_INIT(&ml_head
);
1383 mon_printf(f
, "memory\n");
1384 mtree_print_mr(mon_printf
, f
, address_space_memory
.root
, 0, 0, &ml_head
);
1386 /* print aliased regions */
1387 QTAILQ_FOREACH(ml
, &ml_head
, queue
) {
1389 mon_printf(f
, "%s\n", ml
->mr
->name
);
1390 mtree_print_mr(mon_printf
, f
, ml
->mr
, 0, 0, &ml_head
);
1394 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, queue
, ml2
) {
1398 if (address_space_io
.root
&&
1399 !QTAILQ_EMPTY(&address_space_io
.root
->subregions
)) {
1400 QTAILQ_INIT(&ml_head
);
1401 mon_printf(f
, "I/O\n");
1402 mtree_print_mr(mon_printf
, f
, address_space_io
.root
, 0, 0, &ml_head
);