block: Allow configuring whether to account failed and invalid ops
[qemu.git] / memory.c
blobc435c8827a90e548f97b941ce53fa155702f4e6f
1 /*
2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "exec/memory.h"
17 #include "exec/address-spaces.h"
18 #include "exec/ioport.h"
19 #include "qapi/visitor.h"
20 #include "qemu/bitops.h"
21 #include "qom/object.h"
22 #include "trace.h"
23 #include <assert.h>
25 #include "exec/memory-internal.h"
26 #include "exec/ram_addr.h"
27 #include "sysemu/sysemu.h"
29 //#define DEBUG_UNASSIGNED
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
33 static unsigned memory_region_transaction_depth;
34 static bool memory_region_update_pending;
35 static bool ioeventfd_update_pending;
36 static bool global_dirty_log = false;
38 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
39 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
41 static QTAILQ_HEAD(, AddressSpace) address_spaces
42 = QTAILQ_HEAD_INITIALIZER(address_spaces);
44 typedef struct AddrRange AddrRange;
47 * Note that signed integers are needed for negative offsetting in aliases
48 * (large MemoryRegion::alias_offset).
50 struct AddrRange {
51 Int128 start;
52 Int128 size;
55 static AddrRange addrrange_make(Int128 start, Int128 size)
57 return (AddrRange) { start, size };
60 static bool addrrange_equal(AddrRange r1, AddrRange r2)
62 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
65 static Int128 addrrange_end(AddrRange r)
67 return int128_add(r.start, r.size);
70 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
72 int128_addto(&range.start, delta);
73 return range;
76 static bool addrrange_contains(AddrRange range, Int128 addr)
78 return int128_ge(addr, range.start)
79 && int128_lt(addr, addrrange_end(range));
82 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
84 return addrrange_contains(r1, r2.start)
85 || addrrange_contains(r2, r1.start);
88 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
90 Int128 start = int128_max(r1.start, r2.start);
91 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
92 return addrrange_make(start, int128_sub(end, start));
95 enum ListenerDirection { Forward, Reverse };
97 static bool memory_listener_match(MemoryListener *listener,
98 MemoryRegionSection *section)
100 return !listener->address_space_filter
101 || listener->address_space_filter == section->address_space;
104 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
105 do { \
106 MemoryListener *_listener; \
108 switch (_direction) { \
109 case Forward: \
110 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
111 if (_listener->_callback) { \
112 _listener->_callback(_listener, ##_args); \
115 break; \
116 case Reverse: \
117 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
118 memory_listeners, link) { \
119 if (_listener->_callback) { \
120 _listener->_callback(_listener, ##_args); \
123 break; \
124 default: \
125 abort(); \
127 } while (0)
129 #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
130 do { \
131 MemoryListener *_listener; \
133 switch (_direction) { \
134 case Forward: \
135 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
136 if (_listener->_callback \
137 && memory_listener_match(_listener, _section)) { \
138 _listener->_callback(_listener, _section, ##_args); \
141 break; \
142 case Reverse: \
143 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
144 memory_listeners, link) { \
145 if (_listener->_callback \
146 && memory_listener_match(_listener, _section)) { \
147 _listener->_callback(_listener, _section, ##_args); \
150 break; \
151 default: \
152 abort(); \
154 } while (0)
156 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
157 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
158 MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
159 .mr = (fr)->mr, \
160 .address_space = (as), \
161 .offset_within_region = (fr)->offset_in_region, \
162 .size = (fr)->addr.size, \
163 .offset_within_address_space = int128_get64((fr)->addr.start), \
164 .readonly = (fr)->readonly, \
165 }), ##_args)
167 struct CoalescedMemoryRange {
168 AddrRange addr;
169 QTAILQ_ENTRY(CoalescedMemoryRange) link;
172 struct MemoryRegionIoeventfd {
173 AddrRange addr;
174 bool match_data;
175 uint64_t data;
176 EventNotifier *e;
179 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
180 MemoryRegionIoeventfd b)
182 if (int128_lt(a.addr.start, b.addr.start)) {
183 return true;
184 } else if (int128_gt(a.addr.start, b.addr.start)) {
185 return false;
186 } else if (int128_lt(a.addr.size, b.addr.size)) {
187 return true;
188 } else if (int128_gt(a.addr.size, b.addr.size)) {
189 return false;
190 } else if (a.match_data < b.match_data) {
191 return true;
192 } else if (a.match_data > b.match_data) {
193 return false;
194 } else if (a.match_data) {
195 if (a.data < b.data) {
196 return true;
197 } else if (a.data > b.data) {
198 return false;
201 if (a.e < b.e) {
202 return true;
203 } else if (a.e > b.e) {
204 return false;
206 return false;
209 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
210 MemoryRegionIoeventfd b)
212 return !memory_region_ioeventfd_before(a, b)
213 && !memory_region_ioeventfd_before(b, a);
216 typedef struct FlatRange FlatRange;
217 typedef struct FlatView FlatView;
219 /* Range of memory in the global map. Addresses are absolute. */
220 struct FlatRange {
221 MemoryRegion *mr;
222 hwaddr offset_in_region;
223 AddrRange addr;
224 uint8_t dirty_log_mask;
225 bool romd_mode;
226 bool readonly;
229 /* Flattened global view of current active memory hierarchy. Kept in sorted
230 * order.
232 struct FlatView {
233 struct rcu_head rcu;
234 unsigned ref;
235 FlatRange *ranges;
236 unsigned nr;
237 unsigned nr_allocated;
240 typedef struct AddressSpaceOps AddressSpaceOps;
242 #define FOR_EACH_FLAT_RANGE(var, view) \
243 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
245 static bool flatrange_equal(FlatRange *a, FlatRange *b)
247 return a->mr == b->mr
248 && addrrange_equal(a->addr, b->addr)
249 && a->offset_in_region == b->offset_in_region
250 && a->romd_mode == b->romd_mode
251 && a->readonly == b->readonly;
254 static void flatview_init(FlatView *view)
256 view->ref = 1;
257 view->ranges = NULL;
258 view->nr = 0;
259 view->nr_allocated = 0;
262 /* Insert a range into a given position. Caller is responsible for maintaining
263 * sorting order.
265 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
267 if (view->nr == view->nr_allocated) {
268 view->nr_allocated = MAX(2 * view->nr, 10);
269 view->ranges = g_realloc(view->ranges,
270 view->nr_allocated * sizeof(*view->ranges));
272 memmove(view->ranges + pos + 1, view->ranges + pos,
273 (view->nr - pos) * sizeof(FlatRange));
274 view->ranges[pos] = *range;
275 memory_region_ref(range->mr);
276 ++view->nr;
279 static void flatview_destroy(FlatView *view)
281 int i;
283 for (i = 0; i < view->nr; i++) {
284 memory_region_unref(view->ranges[i].mr);
286 g_free(view->ranges);
287 g_free(view);
290 static void flatview_ref(FlatView *view)
292 atomic_inc(&view->ref);
295 static void flatview_unref(FlatView *view)
297 if (atomic_fetch_dec(&view->ref) == 1) {
298 flatview_destroy(view);
302 static bool can_merge(FlatRange *r1, FlatRange *r2)
304 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
305 && r1->mr == r2->mr
306 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
307 r1->addr.size),
308 int128_make64(r2->offset_in_region))
309 && r1->dirty_log_mask == r2->dirty_log_mask
310 && r1->romd_mode == r2->romd_mode
311 && r1->readonly == r2->readonly;
314 /* Attempt to simplify a view by merging adjacent ranges */
315 static void flatview_simplify(FlatView *view)
317 unsigned i, j;
319 i = 0;
320 while (i < view->nr) {
321 j = i + 1;
322 while (j < view->nr
323 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
324 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
325 ++j;
327 ++i;
328 memmove(&view->ranges[i], &view->ranges[j],
329 (view->nr - j) * sizeof(view->ranges[j]));
330 view->nr -= j - i;
334 static bool memory_region_big_endian(MemoryRegion *mr)
336 #ifdef TARGET_WORDS_BIGENDIAN
337 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
338 #else
339 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
340 #endif
343 static bool memory_region_wrong_endianness(MemoryRegion *mr)
345 #ifdef TARGET_WORDS_BIGENDIAN
346 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
347 #else
348 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
349 #endif
352 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
354 if (memory_region_wrong_endianness(mr)) {
355 switch (size) {
356 case 1:
357 break;
358 case 2:
359 *data = bswap16(*data);
360 break;
361 case 4:
362 *data = bswap32(*data);
363 break;
364 case 8:
365 *data = bswap64(*data);
366 break;
367 default:
368 abort();
373 static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
374 hwaddr addr,
375 uint64_t *value,
376 unsigned size,
377 unsigned shift,
378 uint64_t mask,
379 MemTxAttrs attrs)
381 uint64_t tmp;
383 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
384 trace_memory_region_ops_read(mr, addr, tmp, size);
385 *value |= (tmp & mask) << shift;
386 return MEMTX_OK;
389 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
390 hwaddr addr,
391 uint64_t *value,
392 unsigned size,
393 unsigned shift,
394 uint64_t mask,
395 MemTxAttrs attrs)
397 uint64_t tmp;
399 tmp = mr->ops->read(mr->opaque, addr, size);
400 trace_memory_region_ops_read(mr, addr, tmp, size);
401 *value |= (tmp & mask) << shift;
402 return MEMTX_OK;
405 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
406 hwaddr addr,
407 uint64_t *value,
408 unsigned size,
409 unsigned shift,
410 uint64_t mask,
411 MemTxAttrs attrs)
413 uint64_t tmp = 0;
414 MemTxResult r;
416 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
417 trace_memory_region_ops_read(mr, addr, tmp, size);
418 *value |= (tmp & mask) << shift;
419 return r;
422 static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
423 hwaddr addr,
424 uint64_t *value,
425 unsigned size,
426 unsigned shift,
427 uint64_t mask,
428 MemTxAttrs attrs)
430 uint64_t tmp;
432 tmp = (*value >> shift) & mask;
433 trace_memory_region_ops_write(mr, addr, tmp, size);
434 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
435 return MEMTX_OK;
438 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
439 hwaddr addr,
440 uint64_t *value,
441 unsigned size,
442 unsigned shift,
443 uint64_t mask,
444 MemTxAttrs attrs)
446 uint64_t tmp;
448 tmp = (*value >> shift) & mask;
449 trace_memory_region_ops_write(mr, addr, tmp, size);
450 mr->ops->write(mr->opaque, addr, tmp, size);
451 return MEMTX_OK;
454 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
455 hwaddr addr,
456 uint64_t *value,
457 unsigned size,
458 unsigned shift,
459 uint64_t mask,
460 MemTxAttrs attrs)
462 uint64_t tmp;
464 tmp = (*value >> shift) & mask;
465 trace_memory_region_ops_write(mr, addr, tmp, size);
466 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
469 static MemTxResult access_with_adjusted_size(hwaddr addr,
470 uint64_t *value,
471 unsigned size,
472 unsigned access_size_min,
473 unsigned access_size_max,
474 MemTxResult (*access)(MemoryRegion *mr,
475 hwaddr addr,
476 uint64_t *value,
477 unsigned size,
478 unsigned shift,
479 uint64_t mask,
480 MemTxAttrs attrs),
481 MemoryRegion *mr,
482 MemTxAttrs attrs)
484 uint64_t access_mask;
485 unsigned access_size;
486 unsigned i;
487 MemTxResult r = MEMTX_OK;
489 if (!access_size_min) {
490 access_size_min = 1;
492 if (!access_size_max) {
493 access_size_max = 4;
496 /* FIXME: support unaligned access? */
497 access_size = MAX(MIN(size, access_size_max), access_size_min);
498 access_mask = -1ULL >> (64 - access_size * 8);
499 if (memory_region_big_endian(mr)) {
500 for (i = 0; i < size; i += access_size) {
501 r |= access(mr, addr + i, value, access_size,
502 (size - access_size - i) * 8, access_mask, attrs);
504 } else {
505 for (i = 0; i < size; i += access_size) {
506 r |= access(mr, addr + i, value, access_size, i * 8,
507 access_mask, attrs);
510 return r;
513 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
515 AddressSpace *as;
517 while (mr->container) {
518 mr = mr->container;
520 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
521 if (mr == as->root) {
522 return as;
525 return NULL;
528 /* Render a memory region into the global view. Ranges in @view obscure
529 * ranges in @mr.
531 static void render_memory_region(FlatView *view,
532 MemoryRegion *mr,
533 Int128 base,
534 AddrRange clip,
535 bool readonly)
537 MemoryRegion *subregion;
538 unsigned i;
539 hwaddr offset_in_region;
540 Int128 remain;
541 Int128 now;
542 FlatRange fr;
543 AddrRange tmp;
545 if (!mr->enabled) {
546 return;
549 int128_addto(&base, int128_make64(mr->addr));
550 readonly |= mr->readonly;
552 tmp = addrrange_make(base, mr->size);
554 if (!addrrange_intersects(tmp, clip)) {
555 return;
558 clip = addrrange_intersection(tmp, clip);
560 if (mr->alias) {
561 int128_subfrom(&base, int128_make64(mr->alias->addr));
562 int128_subfrom(&base, int128_make64(mr->alias_offset));
563 render_memory_region(view, mr->alias, base, clip, readonly);
564 return;
567 /* Render subregions in priority order. */
568 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
569 render_memory_region(view, subregion, base, clip, readonly);
572 if (!mr->terminates) {
573 return;
576 offset_in_region = int128_get64(int128_sub(clip.start, base));
577 base = clip.start;
578 remain = clip.size;
580 fr.mr = mr;
581 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
582 fr.romd_mode = mr->romd_mode;
583 fr.readonly = readonly;
585 /* Render the region itself into any gaps left by the current view. */
586 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
587 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
588 continue;
590 if (int128_lt(base, view->ranges[i].addr.start)) {
591 now = int128_min(remain,
592 int128_sub(view->ranges[i].addr.start, base));
593 fr.offset_in_region = offset_in_region;
594 fr.addr = addrrange_make(base, now);
595 flatview_insert(view, i, &fr);
596 ++i;
597 int128_addto(&base, now);
598 offset_in_region += int128_get64(now);
599 int128_subfrom(&remain, now);
601 now = int128_sub(int128_min(int128_add(base, remain),
602 addrrange_end(view->ranges[i].addr)),
603 base);
604 int128_addto(&base, now);
605 offset_in_region += int128_get64(now);
606 int128_subfrom(&remain, now);
608 if (int128_nz(remain)) {
609 fr.offset_in_region = offset_in_region;
610 fr.addr = addrrange_make(base, remain);
611 flatview_insert(view, i, &fr);
615 /* Render a memory topology into a list of disjoint absolute ranges. */
616 static FlatView *generate_memory_topology(MemoryRegion *mr)
618 FlatView *view;
620 view = g_new(FlatView, 1);
621 flatview_init(view);
623 if (mr) {
624 render_memory_region(view, mr, int128_zero(),
625 addrrange_make(int128_zero(), int128_2_64()), false);
627 flatview_simplify(view);
629 return view;
632 static void address_space_add_del_ioeventfds(AddressSpace *as,
633 MemoryRegionIoeventfd *fds_new,
634 unsigned fds_new_nb,
635 MemoryRegionIoeventfd *fds_old,
636 unsigned fds_old_nb)
638 unsigned iold, inew;
639 MemoryRegionIoeventfd *fd;
640 MemoryRegionSection section;
642 /* Generate a symmetric difference of the old and new fd sets, adding
643 * and deleting as necessary.
646 iold = inew = 0;
647 while (iold < fds_old_nb || inew < fds_new_nb) {
648 if (iold < fds_old_nb
649 && (inew == fds_new_nb
650 || memory_region_ioeventfd_before(fds_old[iold],
651 fds_new[inew]))) {
652 fd = &fds_old[iold];
653 section = (MemoryRegionSection) {
654 .address_space = as,
655 .offset_within_address_space = int128_get64(fd->addr.start),
656 .size = fd->addr.size,
658 MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
659 fd->match_data, fd->data, fd->e);
660 ++iold;
661 } else if (inew < fds_new_nb
662 && (iold == fds_old_nb
663 || memory_region_ioeventfd_before(fds_new[inew],
664 fds_old[iold]))) {
665 fd = &fds_new[inew];
666 section = (MemoryRegionSection) {
667 .address_space = as,
668 .offset_within_address_space = int128_get64(fd->addr.start),
669 .size = fd->addr.size,
671 MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
672 fd->match_data, fd->data, fd->e);
673 ++inew;
674 } else {
675 ++iold;
676 ++inew;
681 static FlatView *address_space_get_flatview(AddressSpace *as)
683 FlatView *view;
685 rcu_read_lock();
686 view = atomic_rcu_read(&as->current_map);
687 flatview_ref(view);
688 rcu_read_unlock();
689 return view;
692 static void address_space_update_ioeventfds(AddressSpace *as)
694 FlatView *view;
695 FlatRange *fr;
696 unsigned ioeventfd_nb = 0;
697 MemoryRegionIoeventfd *ioeventfds = NULL;
698 AddrRange tmp;
699 unsigned i;
701 view = address_space_get_flatview(as);
702 FOR_EACH_FLAT_RANGE(fr, view) {
703 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
704 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
705 int128_sub(fr->addr.start,
706 int128_make64(fr->offset_in_region)));
707 if (addrrange_intersects(fr->addr, tmp)) {
708 ++ioeventfd_nb;
709 ioeventfds = g_realloc(ioeventfds,
710 ioeventfd_nb * sizeof(*ioeventfds));
711 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
712 ioeventfds[ioeventfd_nb-1].addr = tmp;
717 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
718 as->ioeventfds, as->ioeventfd_nb);
720 g_free(as->ioeventfds);
721 as->ioeventfds = ioeventfds;
722 as->ioeventfd_nb = ioeventfd_nb;
723 flatview_unref(view);
726 static void address_space_update_topology_pass(AddressSpace *as,
727 const FlatView *old_view,
728 const FlatView *new_view,
729 bool adding)
731 unsigned iold, inew;
732 FlatRange *frold, *frnew;
734 /* Generate a symmetric difference of the old and new memory maps.
735 * Kill ranges in the old map, and instantiate ranges in the new map.
737 iold = inew = 0;
738 while (iold < old_view->nr || inew < new_view->nr) {
739 if (iold < old_view->nr) {
740 frold = &old_view->ranges[iold];
741 } else {
742 frold = NULL;
744 if (inew < new_view->nr) {
745 frnew = &new_view->ranges[inew];
746 } else {
747 frnew = NULL;
750 if (frold
751 && (!frnew
752 || int128_lt(frold->addr.start, frnew->addr.start)
753 || (int128_eq(frold->addr.start, frnew->addr.start)
754 && !flatrange_equal(frold, frnew)))) {
755 /* In old but not in new, or in both but attributes changed. */
757 if (!adding) {
758 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
761 ++iold;
762 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
763 /* In both and unchanged (except logging may have changed) */
765 if (adding) {
766 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
767 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
768 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
769 frold->dirty_log_mask,
770 frnew->dirty_log_mask);
772 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
773 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
774 frold->dirty_log_mask,
775 frnew->dirty_log_mask);
779 ++iold;
780 ++inew;
781 } else {
782 /* In new */
784 if (adding) {
785 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
788 ++inew;
794 static void address_space_update_topology(AddressSpace *as)
796 FlatView *old_view = address_space_get_flatview(as);
797 FlatView *new_view = generate_memory_topology(as->root);
799 address_space_update_topology_pass(as, old_view, new_view, false);
800 address_space_update_topology_pass(as, old_view, new_view, true);
802 /* Writes are protected by the BQL. */
803 atomic_rcu_set(&as->current_map, new_view);
804 call_rcu(old_view, flatview_unref, rcu);
806 /* Note that all the old MemoryRegions are still alive up to this
807 * point. This relieves most MemoryListeners from the need to
808 * ref/unref the MemoryRegions they get---unless they use them
809 * outside the iothread mutex, in which case precise reference
810 * counting is necessary.
812 flatview_unref(old_view);
814 address_space_update_ioeventfds(as);
817 void memory_region_transaction_begin(void)
819 qemu_flush_coalesced_mmio_buffer();
820 ++memory_region_transaction_depth;
823 static void memory_region_clear_pending(void)
825 memory_region_update_pending = false;
826 ioeventfd_update_pending = false;
829 void memory_region_transaction_commit(void)
831 AddressSpace *as;
833 assert(memory_region_transaction_depth);
834 --memory_region_transaction_depth;
835 if (!memory_region_transaction_depth) {
836 if (memory_region_update_pending) {
837 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
839 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
840 address_space_update_topology(as);
843 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
844 } else if (ioeventfd_update_pending) {
845 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
846 address_space_update_ioeventfds(as);
849 memory_region_clear_pending();
853 static void memory_region_destructor_none(MemoryRegion *mr)
857 static void memory_region_destructor_ram(MemoryRegion *mr)
859 qemu_ram_free(mr->ram_addr);
862 static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
864 qemu_ram_free_from_ptr(mr->ram_addr);
867 static void memory_region_destructor_rom_device(MemoryRegion *mr)
869 qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
872 static bool memory_region_need_escape(char c)
874 return c == '/' || c == '[' || c == '\\' || c == ']';
877 static char *memory_region_escape_name(const char *name)
879 const char *p;
880 char *escaped, *q;
881 uint8_t c;
882 size_t bytes = 0;
884 for (p = name; *p; p++) {
885 bytes += memory_region_need_escape(*p) ? 4 : 1;
887 if (bytes == p - name) {
888 return g_memdup(name, bytes + 1);
891 escaped = g_malloc(bytes + 1);
892 for (p = name, q = escaped; *p; p++) {
893 c = *p;
894 if (unlikely(memory_region_need_escape(c))) {
895 *q++ = '\\';
896 *q++ = 'x';
897 *q++ = "0123456789abcdef"[c >> 4];
898 c = "0123456789abcdef"[c & 15];
900 *q++ = c;
902 *q = 0;
903 return escaped;
906 void memory_region_init(MemoryRegion *mr,
907 Object *owner,
908 const char *name,
909 uint64_t size)
911 if (!owner) {
912 owner = container_get(qdev_get_machine(), "/unattached");
915 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
916 mr->size = int128_make64(size);
917 if (size == UINT64_MAX) {
918 mr->size = int128_2_64();
920 mr->name = g_strdup(name);
922 if (name) {
923 char *escaped_name = memory_region_escape_name(name);
924 char *name_array = g_strdup_printf("%s[*]", escaped_name);
925 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
926 object_unref(OBJECT(mr));
927 g_free(name_array);
928 g_free(escaped_name);
932 static void memory_region_get_addr(Object *obj, Visitor *v, void *opaque,
933 const char *name, Error **errp)
935 MemoryRegion *mr = MEMORY_REGION(obj);
936 uint64_t value = mr->addr;
938 visit_type_uint64(v, &value, name, errp);
941 static void memory_region_get_container(Object *obj, Visitor *v, void *opaque,
942 const char *name, Error **errp)
944 MemoryRegion *mr = MEMORY_REGION(obj);
945 gchar *path = (gchar *)"";
947 if (mr->container) {
948 path = object_get_canonical_path(OBJECT(mr->container));
950 visit_type_str(v, &path, name, errp);
951 if (mr->container) {
952 g_free(path);
956 static Object *memory_region_resolve_container(Object *obj, void *opaque,
957 const char *part)
959 MemoryRegion *mr = MEMORY_REGION(obj);
961 return OBJECT(mr->container);
964 static void memory_region_get_priority(Object *obj, Visitor *v, void *opaque,
965 const char *name, Error **errp)
967 MemoryRegion *mr = MEMORY_REGION(obj);
968 int32_t value = mr->priority;
970 visit_type_int32(v, &value, name, errp);
973 static bool memory_region_get_may_overlap(Object *obj, Error **errp)
975 MemoryRegion *mr = MEMORY_REGION(obj);
977 return mr->may_overlap;
980 static void memory_region_get_size(Object *obj, Visitor *v, void *opaque,
981 const char *name, Error **errp)
983 MemoryRegion *mr = MEMORY_REGION(obj);
984 uint64_t value = memory_region_size(mr);
986 visit_type_uint64(v, &value, name, errp);
989 static void memory_region_initfn(Object *obj)
991 MemoryRegion *mr = MEMORY_REGION(obj);
992 ObjectProperty *op;
994 mr->ops = &unassigned_mem_ops;
995 mr->ram_addr = RAM_ADDR_INVALID;
996 mr->enabled = true;
997 mr->romd_mode = true;
998 mr->global_locking = true;
999 mr->destructor = memory_region_destructor_none;
1000 QTAILQ_INIT(&mr->subregions);
1001 QTAILQ_INIT(&mr->coalesced);
1003 op = object_property_add(OBJECT(mr), "container",
1004 "link<" TYPE_MEMORY_REGION ">",
1005 memory_region_get_container,
1006 NULL, /* memory_region_set_container */
1007 NULL, NULL, &error_abort);
1008 op->resolve = memory_region_resolve_container;
1010 object_property_add(OBJECT(mr), "addr", "uint64",
1011 memory_region_get_addr,
1012 NULL, /* memory_region_set_addr */
1013 NULL, NULL, &error_abort);
1014 object_property_add(OBJECT(mr), "priority", "uint32",
1015 memory_region_get_priority,
1016 NULL, /* memory_region_set_priority */
1017 NULL, NULL, &error_abort);
1018 object_property_add_bool(OBJECT(mr), "may-overlap",
1019 memory_region_get_may_overlap,
1020 NULL, /* memory_region_set_may_overlap */
1021 &error_abort);
1022 object_property_add(OBJECT(mr), "size", "uint64",
1023 memory_region_get_size,
1024 NULL, /* memory_region_set_size, */
1025 NULL, NULL, &error_abort);
1028 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1029 unsigned size)
1031 #ifdef DEBUG_UNASSIGNED
1032 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1033 #endif
1034 if (current_cpu != NULL) {
1035 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1037 return 0;
1040 static void unassigned_mem_write(void *opaque, hwaddr addr,
1041 uint64_t val, unsigned size)
1043 #ifdef DEBUG_UNASSIGNED
1044 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1045 #endif
1046 if (current_cpu != NULL) {
1047 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1051 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1052 unsigned size, bool is_write)
1054 return false;
1057 const MemoryRegionOps unassigned_mem_ops = {
1058 .valid.accepts = unassigned_mem_accepts,
1059 .endianness = DEVICE_NATIVE_ENDIAN,
1062 bool memory_region_access_valid(MemoryRegion *mr,
1063 hwaddr addr,
1064 unsigned size,
1065 bool is_write)
1067 int access_size_min, access_size_max;
1068 int access_size, i;
1070 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1071 return false;
1074 if (!mr->ops->valid.accepts) {
1075 return true;
1078 access_size_min = mr->ops->valid.min_access_size;
1079 if (!mr->ops->valid.min_access_size) {
1080 access_size_min = 1;
1083 access_size_max = mr->ops->valid.max_access_size;
1084 if (!mr->ops->valid.max_access_size) {
1085 access_size_max = 4;
1088 access_size = MAX(MIN(size, access_size_max), access_size_min);
1089 for (i = 0; i < size; i += access_size) {
1090 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1091 is_write)) {
1092 return false;
1096 return true;
1099 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1100 hwaddr addr,
1101 uint64_t *pval,
1102 unsigned size,
1103 MemTxAttrs attrs)
1105 *pval = 0;
1107 if (mr->ops->read) {
1108 return access_with_adjusted_size(addr, pval, size,
1109 mr->ops->impl.min_access_size,
1110 mr->ops->impl.max_access_size,
1111 memory_region_read_accessor,
1112 mr, attrs);
1113 } else if (mr->ops->read_with_attrs) {
1114 return access_with_adjusted_size(addr, pval, size,
1115 mr->ops->impl.min_access_size,
1116 mr->ops->impl.max_access_size,
1117 memory_region_read_with_attrs_accessor,
1118 mr, attrs);
1119 } else {
1120 return access_with_adjusted_size(addr, pval, size, 1, 4,
1121 memory_region_oldmmio_read_accessor,
1122 mr, attrs);
1126 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1127 hwaddr addr,
1128 uint64_t *pval,
1129 unsigned size,
1130 MemTxAttrs attrs)
1132 MemTxResult r;
1134 if (!memory_region_access_valid(mr, addr, size, false)) {
1135 *pval = unassigned_mem_read(mr, addr, size);
1136 return MEMTX_DECODE_ERROR;
1139 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1140 adjust_endianness(mr, pval, size);
1141 return r;
1144 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1145 hwaddr addr,
1146 uint64_t data,
1147 unsigned size,
1148 MemTxAttrs attrs)
1150 if (!memory_region_access_valid(mr, addr, size, true)) {
1151 unassigned_mem_write(mr, addr, data, size);
1152 return MEMTX_DECODE_ERROR;
1155 adjust_endianness(mr, &data, size);
1157 if (mr->ops->write) {
1158 return access_with_adjusted_size(addr, &data, size,
1159 mr->ops->impl.min_access_size,
1160 mr->ops->impl.max_access_size,
1161 memory_region_write_accessor, mr,
1162 attrs);
1163 } else if (mr->ops->write_with_attrs) {
1164 return
1165 access_with_adjusted_size(addr, &data, size,
1166 mr->ops->impl.min_access_size,
1167 mr->ops->impl.max_access_size,
1168 memory_region_write_with_attrs_accessor,
1169 mr, attrs);
1170 } else {
1171 return access_with_adjusted_size(addr, &data, size, 1, 4,
1172 memory_region_oldmmio_write_accessor,
1173 mr, attrs);
1177 void memory_region_init_io(MemoryRegion *mr,
1178 Object *owner,
1179 const MemoryRegionOps *ops,
1180 void *opaque,
1181 const char *name,
1182 uint64_t size)
1184 memory_region_init(mr, owner, name, size);
1185 mr->ops = ops ? ops : &unassigned_mem_ops;
1186 mr->opaque = opaque;
1187 mr->terminates = true;
1190 void memory_region_init_ram(MemoryRegion *mr,
1191 Object *owner,
1192 const char *name,
1193 uint64_t size,
1194 Error **errp)
1196 memory_region_init(mr, owner, name, size);
1197 mr->ram = true;
1198 mr->terminates = true;
1199 mr->destructor = memory_region_destructor_ram;
1200 mr->ram_addr = qemu_ram_alloc(size, mr, errp);
1201 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1204 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1205 Object *owner,
1206 const char *name,
1207 uint64_t size,
1208 uint64_t max_size,
1209 void (*resized)(const char*,
1210 uint64_t length,
1211 void *host),
1212 Error **errp)
1214 memory_region_init(mr, owner, name, size);
1215 mr->ram = true;
1216 mr->terminates = true;
1217 mr->destructor = memory_region_destructor_ram;
1218 mr->ram_addr = qemu_ram_alloc_resizeable(size, max_size, resized, mr, errp);
1219 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1222 #ifdef __linux__
1223 void memory_region_init_ram_from_file(MemoryRegion *mr,
1224 struct Object *owner,
1225 const char *name,
1226 uint64_t size,
1227 bool share,
1228 const char *path,
1229 Error **errp)
1231 memory_region_init(mr, owner, name, size);
1232 mr->ram = true;
1233 mr->terminates = true;
1234 mr->destructor = memory_region_destructor_ram;
1235 mr->ram_addr = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1236 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1238 #endif
1240 void memory_region_init_ram_ptr(MemoryRegion *mr,
1241 Object *owner,
1242 const char *name,
1243 uint64_t size,
1244 void *ptr)
1246 memory_region_init(mr, owner, name, size);
1247 mr->ram = true;
1248 mr->terminates = true;
1249 mr->destructor = memory_region_destructor_ram_from_ptr;
1250 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1252 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1253 assert(ptr != NULL);
1254 mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1257 void memory_region_set_skip_dump(MemoryRegion *mr)
1259 mr->skip_dump = true;
1262 void memory_region_init_alias(MemoryRegion *mr,
1263 Object *owner,
1264 const char *name,
1265 MemoryRegion *orig,
1266 hwaddr offset,
1267 uint64_t size)
1269 memory_region_init(mr, owner, name, size);
1270 mr->alias = orig;
1271 mr->alias_offset = offset;
1274 void memory_region_init_rom_device(MemoryRegion *mr,
1275 Object *owner,
1276 const MemoryRegionOps *ops,
1277 void *opaque,
1278 const char *name,
1279 uint64_t size,
1280 Error **errp)
1282 memory_region_init(mr, owner, name, size);
1283 mr->ops = ops;
1284 mr->opaque = opaque;
1285 mr->terminates = true;
1286 mr->rom_device = true;
1287 mr->destructor = memory_region_destructor_rom_device;
1288 mr->ram_addr = qemu_ram_alloc(size, mr, errp);
1291 void memory_region_init_iommu(MemoryRegion *mr,
1292 Object *owner,
1293 const MemoryRegionIOMMUOps *ops,
1294 const char *name,
1295 uint64_t size)
1297 memory_region_init(mr, owner, name, size);
1298 mr->iommu_ops = ops,
1299 mr->terminates = true; /* then re-forwards */
1300 notifier_list_init(&mr->iommu_notify);
1303 static void memory_region_finalize(Object *obj)
1305 MemoryRegion *mr = MEMORY_REGION(obj);
1307 assert(!mr->container);
1309 /* We know the region is not visible in any address space (it
1310 * does not have a container and cannot be a root either because
1311 * it has no references, so we can blindly clear mr->enabled.
1312 * memory_region_set_enabled instead could trigger a transaction
1313 * and cause an infinite loop.
1315 mr->enabled = false;
1316 memory_region_transaction_begin();
1317 while (!QTAILQ_EMPTY(&mr->subregions)) {
1318 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1319 memory_region_del_subregion(mr, subregion);
1321 memory_region_transaction_commit();
1323 mr->destructor(mr);
1324 memory_region_clear_coalescing(mr);
1325 g_free((char *)mr->name);
1326 g_free(mr->ioeventfds);
1329 Object *memory_region_owner(MemoryRegion *mr)
1331 Object *obj = OBJECT(mr);
1332 return obj->parent;
1335 void memory_region_ref(MemoryRegion *mr)
1337 /* MMIO callbacks most likely will access data that belongs
1338 * to the owner, hence the need to ref/unref the owner whenever
1339 * the memory region is in use.
1341 * The memory region is a child of its owner. As long as the
1342 * owner doesn't call unparent itself on the memory region,
1343 * ref-ing the owner will also keep the memory region alive.
1344 * Memory regions without an owner are supposed to never go away,
1345 * but we still ref/unref them for debugging purposes.
1347 Object *obj = OBJECT(mr);
1348 if (obj && obj->parent) {
1349 object_ref(obj->parent);
1350 } else {
1351 object_ref(obj);
1355 void memory_region_unref(MemoryRegion *mr)
1357 Object *obj = OBJECT(mr);
1358 if (obj && obj->parent) {
1359 object_unref(obj->parent);
1360 } else {
1361 object_unref(obj);
1365 uint64_t memory_region_size(MemoryRegion *mr)
1367 if (int128_eq(mr->size, int128_2_64())) {
1368 return UINT64_MAX;
1370 return int128_get64(mr->size);
1373 const char *memory_region_name(const MemoryRegion *mr)
1375 if (!mr->name) {
1376 ((MemoryRegion *)mr)->name =
1377 object_get_canonical_path_component(OBJECT(mr));
1379 return mr->name;
1382 bool memory_region_is_ram(MemoryRegion *mr)
1384 return mr->ram;
1387 bool memory_region_is_skip_dump(MemoryRegion *mr)
1389 return mr->skip_dump;
1392 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1394 uint8_t mask = mr->dirty_log_mask;
1395 if (global_dirty_log) {
1396 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1398 return mask;
1401 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1403 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1406 bool memory_region_is_rom(MemoryRegion *mr)
1408 return mr->ram && mr->readonly;
1411 bool memory_region_is_iommu(MemoryRegion *mr)
1413 return mr->iommu_ops;
1416 void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
1418 notifier_list_add(&mr->iommu_notify, n);
1421 void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n,
1422 hwaddr granularity, bool is_write)
1424 hwaddr addr;
1425 IOMMUTLBEntry iotlb;
1427 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1428 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
1429 if (iotlb.perm != IOMMU_NONE) {
1430 n->notify(n, &iotlb);
1433 /* if (2^64 - MR size) < granularity, it's possible to get an
1434 * infinite loop here. This should catch such a wraparound */
1435 if ((addr + granularity) < addr) {
1436 break;
1441 void memory_region_unregister_iommu_notifier(Notifier *n)
1443 notifier_remove(n);
1446 void memory_region_notify_iommu(MemoryRegion *mr,
1447 IOMMUTLBEntry entry)
1449 assert(memory_region_is_iommu(mr));
1450 notifier_list_notify(&mr->iommu_notify, &entry);
1453 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1455 uint8_t mask = 1 << client;
1456 uint8_t old_logging;
1458 assert(client == DIRTY_MEMORY_VGA);
1459 old_logging = mr->vga_logging_count;
1460 mr->vga_logging_count += log ? 1 : -1;
1461 if (!!old_logging == !!mr->vga_logging_count) {
1462 return;
1465 memory_region_transaction_begin();
1466 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1467 memory_region_update_pending |= mr->enabled;
1468 memory_region_transaction_commit();
1471 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1472 hwaddr size, unsigned client)
1474 assert(mr->ram_addr != RAM_ADDR_INVALID);
1475 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
1478 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1479 hwaddr size)
1481 assert(mr->ram_addr != RAM_ADDR_INVALID);
1482 cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size,
1483 memory_region_get_dirty_log_mask(mr));
1486 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1487 hwaddr size, unsigned client)
1489 assert(mr->ram_addr != RAM_ADDR_INVALID);
1490 return cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr,
1491 size, client);
1495 void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1497 AddressSpace *as;
1498 FlatRange *fr;
1500 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1501 FlatView *view = address_space_get_flatview(as);
1502 FOR_EACH_FLAT_RANGE(fr, view) {
1503 if (fr->mr == mr) {
1504 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
1507 flatview_unref(view);
1511 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1513 if (mr->readonly != readonly) {
1514 memory_region_transaction_begin();
1515 mr->readonly = readonly;
1516 memory_region_update_pending |= mr->enabled;
1517 memory_region_transaction_commit();
1521 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
1523 if (mr->romd_mode != romd_mode) {
1524 memory_region_transaction_begin();
1525 mr->romd_mode = romd_mode;
1526 memory_region_update_pending |= mr->enabled;
1527 memory_region_transaction_commit();
1531 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1532 hwaddr size, unsigned client)
1534 assert(mr->ram_addr != RAM_ADDR_INVALID);
1535 cpu_physical_memory_test_and_clear_dirty(mr->ram_addr + addr, size,
1536 client);
1539 int memory_region_get_fd(MemoryRegion *mr)
1541 if (mr->alias) {
1542 return memory_region_get_fd(mr->alias);
1545 assert(mr->ram_addr != RAM_ADDR_INVALID);
1547 return qemu_get_ram_fd(mr->ram_addr & TARGET_PAGE_MASK);
1550 void *memory_region_get_ram_ptr(MemoryRegion *mr)
1552 if (mr->alias) {
1553 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
1556 assert(mr->ram_addr != RAM_ADDR_INVALID);
1558 return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
1561 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1563 assert(mr->ram_addr != RAM_ADDR_INVALID);
1565 qemu_ram_resize(mr->ram_addr, newsize, errp);
1568 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
1570 FlatView *view;
1571 FlatRange *fr;
1572 CoalescedMemoryRange *cmr;
1573 AddrRange tmp;
1574 MemoryRegionSection section;
1576 view = address_space_get_flatview(as);
1577 FOR_EACH_FLAT_RANGE(fr, view) {
1578 if (fr->mr == mr) {
1579 section = (MemoryRegionSection) {
1580 .address_space = as,
1581 .offset_within_address_space = int128_get64(fr->addr.start),
1582 .size = fr->addr.size,
1585 MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
1586 int128_get64(fr->addr.start),
1587 int128_get64(fr->addr.size));
1588 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1589 tmp = addrrange_shift(cmr->addr,
1590 int128_sub(fr->addr.start,
1591 int128_make64(fr->offset_in_region)));
1592 if (!addrrange_intersects(tmp, fr->addr)) {
1593 continue;
1595 tmp = addrrange_intersection(tmp, fr->addr);
1596 MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
1597 int128_get64(tmp.start),
1598 int128_get64(tmp.size));
1602 flatview_unref(view);
1605 static void memory_region_update_coalesced_range(MemoryRegion *mr)
1607 AddressSpace *as;
1609 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1610 memory_region_update_coalesced_range_as(mr, as);
1614 void memory_region_set_coalescing(MemoryRegion *mr)
1616 memory_region_clear_coalescing(mr);
1617 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1620 void memory_region_add_coalescing(MemoryRegion *mr,
1621 hwaddr offset,
1622 uint64_t size)
1624 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
1626 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
1627 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1628 memory_region_update_coalesced_range(mr);
1629 memory_region_set_flush_coalesced(mr);
1632 void memory_region_clear_coalescing(MemoryRegion *mr)
1634 CoalescedMemoryRange *cmr;
1635 bool updated = false;
1637 qemu_flush_coalesced_mmio_buffer();
1638 mr->flush_coalesced_mmio = false;
1640 while (!QTAILQ_EMPTY(&mr->coalesced)) {
1641 cmr = QTAILQ_FIRST(&mr->coalesced);
1642 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
1643 g_free(cmr);
1644 updated = true;
1647 if (updated) {
1648 memory_region_update_coalesced_range(mr);
1652 void memory_region_set_flush_coalesced(MemoryRegion *mr)
1654 mr->flush_coalesced_mmio = true;
1657 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
1659 qemu_flush_coalesced_mmio_buffer();
1660 if (QTAILQ_EMPTY(&mr->coalesced)) {
1661 mr->flush_coalesced_mmio = false;
1665 void memory_region_set_global_locking(MemoryRegion *mr)
1667 mr->global_locking = true;
1670 void memory_region_clear_global_locking(MemoryRegion *mr)
1672 mr->global_locking = false;
1675 void memory_region_add_eventfd(MemoryRegion *mr,
1676 hwaddr addr,
1677 unsigned size,
1678 bool match_data,
1679 uint64_t data,
1680 EventNotifier *e)
1682 MemoryRegionIoeventfd mrfd = {
1683 .addr.start = int128_make64(addr),
1684 .addr.size = int128_make64(size),
1685 .match_data = match_data,
1686 .data = data,
1687 .e = e,
1689 unsigned i;
1691 adjust_endianness(mr, &mrfd.data, size);
1692 memory_region_transaction_begin();
1693 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1694 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1695 break;
1698 ++mr->ioeventfd_nb;
1699 mr->ioeventfds = g_realloc(mr->ioeventfds,
1700 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1701 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1702 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1703 mr->ioeventfds[i] = mrfd;
1704 ioeventfd_update_pending |= mr->enabled;
1705 memory_region_transaction_commit();
1708 void memory_region_del_eventfd(MemoryRegion *mr,
1709 hwaddr addr,
1710 unsigned size,
1711 bool match_data,
1712 uint64_t data,
1713 EventNotifier *e)
1715 MemoryRegionIoeventfd mrfd = {
1716 .addr.start = int128_make64(addr),
1717 .addr.size = int128_make64(size),
1718 .match_data = match_data,
1719 .data = data,
1720 .e = e,
1722 unsigned i;
1724 adjust_endianness(mr, &mrfd.data, size);
1725 memory_region_transaction_begin();
1726 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1727 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1728 break;
1731 assert(i != mr->ioeventfd_nb);
1732 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1733 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1734 --mr->ioeventfd_nb;
1735 mr->ioeventfds = g_realloc(mr->ioeventfds,
1736 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1737 ioeventfd_update_pending |= mr->enabled;
1738 memory_region_transaction_commit();
1741 static void memory_region_update_container_subregions(MemoryRegion *subregion)
1743 hwaddr offset = subregion->addr;
1744 MemoryRegion *mr = subregion->container;
1745 MemoryRegion *other;
1747 memory_region_transaction_begin();
1749 memory_region_ref(subregion);
1750 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1751 if (subregion->may_overlap || other->may_overlap) {
1752 continue;
1754 if (int128_ge(int128_make64(offset),
1755 int128_add(int128_make64(other->addr), other->size))
1756 || int128_le(int128_add(int128_make64(offset), subregion->size),
1757 int128_make64(other->addr))) {
1758 continue;
1760 #if 0
1761 printf("warning: subregion collision %llx/%llx (%s) "
1762 "vs %llx/%llx (%s)\n",
1763 (unsigned long long)offset,
1764 (unsigned long long)int128_get64(subregion->size),
1765 subregion->name,
1766 (unsigned long long)other->addr,
1767 (unsigned long long)int128_get64(other->size),
1768 other->name);
1769 #endif
1771 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1772 if (subregion->priority >= other->priority) {
1773 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1774 goto done;
1777 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1778 done:
1779 memory_region_update_pending |= mr->enabled && subregion->enabled;
1780 memory_region_transaction_commit();
1783 static void memory_region_add_subregion_common(MemoryRegion *mr,
1784 hwaddr offset,
1785 MemoryRegion *subregion)
1787 assert(!subregion->container);
1788 subregion->container = mr;
1789 subregion->addr = offset;
1790 memory_region_update_container_subregions(subregion);
1793 void memory_region_add_subregion(MemoryRegion *mr,
1794 hwaddr offset,
1795 MemoryRegion *subregion)
1797 subregion->may_overlap = false;
1798 subregion->priority = 0;
1799 memory_region_add_subregion_common(mr, offset, subregion);
1802 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1803 hwaddr offset,
1804 MemoryRegion *subregion,
1805 int priority)
1807 subregion->may_overlap = true;
1808 subregion->priority = priority;
1809 memory_region_add_subregion_common(mr, offset, subregion);
1812 void memory_region_del_subregion(MemoryRegion *mr,
1813 MemoryRegion *subregion)
1815 memory_region_transaction_begin();
1816 assert(subregion->container == mr);
1817 subregion->container = NULL;
1818 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
1819 memory_region_unref(subregion);
1820 memory_region_update_pending |= mr->enabled && subregion->enabled;
1821 memory_region_transaction_commit();
1824 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
1826 if (enabled == mr->enabled) {
1827 return;
1829 memory_region_transaction_begin();
1830 mr->enabled = enabled;
1831 memory_region_update_pending = true;
1832 memory_region_transaction_commit();
1835 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
1837 Int128 s = int128_make64(size);
1839 if (size == UINT64_MAX) {
1840 s = int128_2_64();
1842 if (int128_eq(s, mr->size)) {
1843 return;
1845 memory_region_transaction_begin();
1846 mr->size = s;
1847 memory_region_update_pending = true;
1848 memory_region_transaction_commit();
1851 static void memory_region_readd_subregion(MemoryRegion *mr)
1853 MemoryRegion *container = mr->container;
1855 if (container) {
1856 memory_region_transaction_begin();
1857 memory_region_ref(mr);
1858 memory_region_del_subregion(container, mr);
1859 mr->container = container;
1860 memory_region_update_container_subregions(mr);
1861 memory_region_unref(mr);
1862 memory_region_transaction_commit();
1866 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
1868 if (addr != mr->addr) {
1869 mr->addr = addr;
1870 memory_region_readd_subregion(mr);
1874 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
1876 assert(mr->alias);
1878 if (offset == mr->alias_offset) {
1879 return;
1882 memory_region_transaction_begin();
1883 mr->alias_offset = offset;
1884 memory_region_update_pending |= mr->enabled;
1885 memory_region_transaction_commit();
1888 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1890 return mr->ram_addr;
1893 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
1895 return mr->align;
1898 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
1900 const AddrRange *addr = addr_;
1901 const FlatRange *fr = fr_;
1903 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
1904 return -1;
1905 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
1906 return 1;
1908 return 0;
1911 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
1913 return bsearch(&addr, view->ranges, view->nr,
1914 sizeof(FlatRange), cmp_flatrange_addr);
1917 bool memory_region_is_mapped(MemoryRegion *mr)
1919 return mr->container ? true : false;
1922 /* Same as memory_region_find, but it does not add a reference to the
1923 * returned region. It must be called from an RCU critical section.
1925 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
1926 hwaddr addr, uint64_t size)
1928 MemoryRegionSection ret = { .mr = NULL };
1929 MemoryRegion *root;
1930 AddressSpace *as;
1931 AddrRange range;
1932 FlatView *view;
1933 FlatRange *fr;
1935 addr += mr->addr;
1936 for (root = mr; root->container; ) {
1937 root = root->container;
1938 addr += root->addr;
1941 as = memory_region_to_address_space(root);
1942 if (!as) {
1943 return ret;
1945 range = addrrange_make(int128_make64(addr), int128_make64(size));
1947 view = atomic_rcu_read(&as->current_map);
1948 fr = flatview_lookup(view, range);
1949 if (!fr) {
1950 return ret;
1953 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
1954 --fr;
1957 ret.mr = fr->mr;
1958 ret.address_space = as;
1959 range = addrrange_intersection(range, fr->addr);
1960 ret.offset_within_region = fr->offset_in_region;
1961 ret.offset_within_region += int128_get64(int128_sub(range.start,
1962 fr->addr.start));
1963 ret.size = range.size;
1964 ret.offset_within_address_space = int128_get64(range.start);
1965 ret.readonly = fr->readonly;
1966 return ret;
1969 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1970 hwaddr addr, uint64_t size)
1972 MemoryRegionSection ret;
1973 rcu_read_lock();
1974 ret = memory_region_find_rcu(mr, addr, size);
1975 if (ret.mr) {
1976 memory_region_ref(ret.mr);
1978 rcu_read_unlock();
1979 return ret;
1982 bool memory_region_present(MemoryRegion *container, hwaddr addr)
1984 MemoryRegion *mr;
1986 rcu_read_lock();
1987 mr = memory_region_find_rcu(container, addr, 1).mr;
1988 rcu_read_unlock();
1989 return mr && mr != container;
1992 void address_space_sync_dirty_bitmap(AddressSpace *as)
1994 FlatView *view;
1995 FlatRange *fr;
1997 view = address_space_get_flatview(as);
1998 FOR_EACH_FLAT_RANGE(fr, view) {
1999 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
2001 flatview_unref(view);
2004 void memory_global_dirty_log_start(void)
2006 global_dirty_log = true;
2008 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2010 /* Refresh DIRTY_LOG_MIGRATION bit. */
2011 memory_region_transaction_begin();
2012 memory_region_update_pending = true;
2013 memory_region_transaction_commit();
2016 void memory_global_dirty_log_stop(void)
2018 global_dirty_log = false;
2020 /* Refresh DIRTY_LOG_MIGRATION bit. */
2021 memory_region_transaction_begin();
2022 memory_region_update_pending = true;
2023 memory_region_transaction_commit();
2025 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2028 static void listener_add_address_space(MemoryListener *listener,
2029 AddressSpace *as)
2031 FlatView *view;
2032 FlatRange *fr;
2034 if (listener->address_space_filter
2035 && listener->address_space_filter != as) {
2036 return;
2039 if (listener->begin) {
2040 listener->begin(listener);
2042 if (global_dirty_log) {
2043 if (listener->log_global_start) {
2044 listener->log_global_start(listener);
2048 view = address_space_get_flatview(as);
2049 FOR_EACH_FLAT_RANGE(fr, view) {
2050 MemoryRegionSection section = {
2051 .mr = fr->mr,
2052 .address_space = as,
2053 .offset_within_region = fr->offset_in_region,
2054 .size = fr->addr.size,
2055 .offset_within_address_space = int128_get64(fr->addr.start),
2056 .readonly = fr->readonly,
2058 if (fr->dirty_log_mask && listener->log_start) {
2059 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2061 if (listener->region_add) {
2062 listener->region_add(listener, &section);
2065 if (listener->commit) {
2066 listener->commit(listener);
2068 flatview_unref(view);
2071 void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
2073 MemoryListener *other = NULL;
2074 AddressSpace *as;
2076 listener->address_space_filter = filter;
2077 if (QTAILQ_EMPTY(&memory_listeners)
2078 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2079 memory_listeners)->priority) {
2080 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2081 } else {
2082 QTAILQ_FOREACH(other, &memory_listeners, link) {
2083 if (listener->priority < other->priority) {
2084 break;
2087 QTAILQ_INSERT_BEFORE(other, listener, link);
2090 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2091 listener_add_address_space(listener, as);
2095 void memory_listener_unregister(MemoryListener *listener)
2097 QTAILQ_REMOVE(&memory_listeners, listener, link);
2100 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2102 memory_region_ref(root);
2103 memory_region_transaction_begin();
2104 as->root = root;
2105 as->current_map = g_new(FlatView, 1);
2106 flatview_init(as->current_map);
2107 as->ioeventfd_nb = 0;
2108 as->ioeventfds = NULL;
2109 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2110 as->name = g_strdup(name ? name : "anonymous");
2111 address_space_init_dispatch(as);
2112 memory_region_update_pending |= root->enabled;
2113 memory_region_transaction_commit();
2116 static void do_address_space_destroy(AddressSpace *as)
2118 MemoryListener *listener;
2120 address_space_destroy_dispatch(as);
2122 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2123 assert(listener->address_space_filter != as);
2126 flatview_unref(as->current_map);
2127 g_free(as->name);
2128 g_free(as->ioeventfds);
2129 memory_region_unref(as->root);
2132 void address_space_destroy(AddressSpace *as)
2134 MemoryRegion *root = as->root;
2136 /* Flush out anything from MemoryListeners listening in on this */
2137 memory_region_transaction_begin();
2138 as->root = NULL;
2139 memory_region_transaction_commit();
2140 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2141 address_space_unregister(as);
2143 /* At this point, as->dispatch and as->current_map are dummy
2144 * entries that the guest should never use. Wait for the old
2145 * values to expire before freeing the data.
2147 as->root = root;
2148 call_rcu(as, do_address_space_destroy, rcu);
2151 typedef struct MemoryRegionList MemoryRegionList;
2153 struct MemoryRegionList {
2154 const MemoryRegion *mr;
2155 QTAILQ_ENTRY(MemoryRegionList) queue;
2158 typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
2160 static void mtree_print_mr(fprintf_function mon_printf, void *f,
2161 const MemoryRegion *mr, unsigned int level,
2162 hwaddr base,
2163 MemoryRegionListHead *alias_print_queue)
2165 MemoryRegionList *new_ml, *ml, *next_ml;
2166 MemoryRegionListHead submr_print_queue;
2167 const MemoryRegion *submr;
2168 unsigned int i;
2170 if (!mr) {
2171 return;
2174 for (i = 0; i < level; i++) {
2175 mon_printf(f, " ");
2178 if (mr->alias) {
2179 MemoryRegionList *ml;
2180 bool found = false;
2182 /* check if the alias is already in the queue */
2183 QTAILQ_FOREACH(ml, alias_print_queue, queue) {
2184 if (ml->mr == mr->alias) {
2185 found = true;
2189 if (!found) {
2190 ml = g_new(MemoryRegionList, 1);
2191 ml->mr = mr->alias;
2192 QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
2194 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2195 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
2196 "-" TARGET_FMT_plx "%s\n",
2197 base + mr->addr,
2198 base + mr->addr
2199 + (int128_nz(mr->size) ?
2200 (hwaddr)int128_get64(int128_sub(mr->size,
2201 int128_one())) : 0),
2202 mr->priority,
2203 mr->romd_mode ? 'R' : '-',
2204 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2205 : '-',
2206 memory_region_name(mr),
2207 memory_region_name(mr->alias),
2208 mr->alias_offset,
2209 mr->alias_offset
2210 + (int128_nz(mr->size) ?
2211 (hwaddr)int128_get64(int128_sub(mr->size,
2212 int128_one())) : 0),
2213 mr->enabled ? "" : " [disabled]");
2214 } else {
2215 mon_printf(f,
2216 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n",
2217 base + mr->addr,
2218 base + mr->addr
2219 + (int128_nz(mr->size) ?
2220 (hwaddr)int128_get64(int128_sub(mr->size,
2221 int128_one())) : 0),
2222 mr->priority,
2223 mr->romd_mode ? 'R' : '-',
2224 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2225 : '-',
2226 memory_region_name(mr),
2227 mr->enabled ? "" : " [disabled]");
2230 QTAILQ_INIT(&submr_print_queue);
2232 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2233 new_ml = g_new(MemoryRegionList, 1);
2234 new_ml->mr = submr;
2235 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2236 if (new_ml->mr->addr < ml->mr->addr ||
2237 (new_ml->mr->addr == ml->mr->addr &&
2238 new_ml->mr->priority > ml->mr->priority)) {
2239 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
2240 new_ml = NULL;
2241 break;
2244 if (new_ml) {
2245 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
2249 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2250 mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
2251 alias_print_queue);
2254 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
2255 g_free(ml);
2259 void mtree_info(fprintf_function mon_printf, void *f)
2261 MemoryRegionListHead ml_head;
2262 MemoryRegionList *ml, *ml2;
2263 AddressSpace *as;
2265 QTAILQ_INIT(&ml_head);
2267 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2268 mon_printf(f, "address-space: %s\n", as->name);
2269 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2270 mon_printf(f, "\n");
2273 /* print aliased regions */
2274 QTAILQ_FOREACH(ml, &ml_head, queue) {
2275 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2276 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2277 mon_printf(f, "\n");
2280 QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
2281 g_free(ml);
2285 static const TypeInfo memory_region_info = {
2286 .parent = TYPE_OBJECT,
2287 .name = TYPE_MEMORY_REGION,
2288 .instance_size = sizeof(MemoryRegion),
2289 .instance_init = memory_region_initfn,
2290 .instance_finalize = memory_region_finalize,
2293 static void memory_register_types(void)
2295 type_register_static(&memory_region_info);
2298 type_init(memory_register_types)