hw/arm/fsl-imx6ul.c: Remove dead SMP-related code
[qemu/ar7.git] / memory.c
blobd4579bbaec3a90b171a0ab5f1654254d763f5069
1 /*
2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "cpu.h"
19 #include "exec/memory.h"
20 #include "exec/address-spaces.h"
21 #include "qapi/visitor.h"
22 #include "qemu/bitops.h"
23 #include "qemu/error-report.h"
24 #include "qemu/qemu-print.h"
25 #include "qom/object.h"
26 #include "trace-root.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/tcg.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
36 //#define DEBUG_UNASSIGNED
38 static unsigned memory_region_transaction_depth;
39 static bool memory_region_update_pending;
40 static bool ioeventfd_update_pending;
41 bool global_dirty_log;
43 static QTAILQ_HEAD(, MemoryListener) memory_listeners
44 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
46 static QTAILQ_HEAD(, AddressSpace) address_spaces
47 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49 static GHashTable *flat_views;
51 typedef struct AddrRange AddrRange;
54 * Note that signed integers are needed for negative offsetting in aliases
55 * (large MemoryRegion::alias_offset).
57 struct AddrRange {
58 Int128 start;
59 Int128 size;
62 static AddrRange addrrange_make(Int128 start, Int128 size)
64 return (AddrRange) { start, size };
67 static bool addrrange_equal(AddrRange r1, AddrRange r2)
69 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
72 static Int128 addrrange_end(AddrRange r)
74 return int128_add(r.start, r.size);
77 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
79 int128_addto(&range.start, delta);
80 return range;
83 static bool addrrange_contains(AddrRange range, Int128 addr)
85 return int128_ge(addr, range.start)
86 && int128_lt(addr, addrrange_end(range));
89 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
91 return addrrange_contains(r1, r2.start)
92 || addrrange_contains(r2, r1.start);
95 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
97 Int128 start = int128_max(r1.start, r2.start);
98 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
99 return addrrange_make(start, int128_sub(end, start));
102 enum ListenerDirection { Forward, Reverse };
104 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
105 do { \
106 MemoryListener *_listener; \
108 switch (_direction) { \
109 case Forward: \
110 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
111 if (_listener->_callback) { \
112 _listener->_callback(_listener, ##_args); \
115 break; \
116 case Reverse: \
117 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
122 break; \
123 default: \
124 abort(); \
126 } while (0)
128 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
129 do { \
130 MemoryListener *_listener; \
132 switch (_direction) { \
133 case Forward: \
134 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
135 if (_listener->_callback) { \
136 _listener->_callback(_listener, _section, ##_args); \
139 break; \
140 case Reverse: \
141 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
142 if (_listener->_callback) { \
143 _listener->_callback(_listener, _section, ##_args); \
146 break; \
147 default: \
148 abort(); \
150 } while (0)
152 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
153 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
154 do { \
155 MemoryRegionSection mrs = section_from_flat_range(fr, \
156 address_space_to_flatview(as)); \
157 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
158 } while(0)
160 struct CoalescedMemoryRange {
161 AddrRange addr;
162 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165 struct MemoryRegionIoeventfd {
166 AddrRange addr;
167 bool match_data;
168 uint64_t data;
169 EventNotifier *e;
172 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
173 MemoryRegionIoeventfd *b)
175 if (int128_lt(a->addr.start, b->addr.start)) {
176 return true;
177 } else if (int128_gt(a->addr.start, b->addr.start)) {
178 return false;
179 } else if (int128_lt(a->addr.size, b->addr.size)) {
180 return true;
181 } else if (int128_gt(a->addr.size, b->addr.size)) {
182 return false;
183 } else if (a->match_data < b->match_data) {
184 return true;
185 } else if (a->match_data > b->match_data) {
186 return false;
187 } else if (a->match_data) {
188 if (a->data < b->data) {
189 return true;
190 } else if (a->data > b->data) {
191 return false;
194 if (a->e < b->e) {
195 return true;
196 } else if (a->e > b->e) {
197 return false;
199 return false;
202 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
203 MemoryRegionIoeventfd *b)
205 return !memory_region_ioeventfd_before(a, b)
206 && !memory_region_ioeventfd_before(b, a);
209 /* Range of memory in the global map. Addresses are absolute. */
210 struct FlatRange {
211 MemoryRegion *mr;
212 hwaddr offset_in_region;
213 AddrRange addr;
214 uint8_t dirty_log_mask;
215 bool romd_mode;
216 bool readonly;
217 bool nonvolatile;
218 int has_coalesced_range;
221 #define FOR_EACH_FLAT_RANGE(var, view) \
222 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
224 static inline MemoryRegionSection
225 section_from_flat_range(FlatRange *fr, FlatView *fv)
227 return (MemoryRegionSection) {
228 .mr = fr->mr,
229 .fv = fv,
230 .offset_within_region = fr->offset_in_region,
231 .size = fr->addr.size,
232 .offset_within_address_space = int128_get64(fr->addr.start),
233 .readonly = fr->readonly,
234 .nonvolatile = fr->nonvolatile,
238 static bool flatrange_equal(FlatRange *a, FlatRange *b)
240 return a->mr == b->mr
241 && addrrange_equal(a->addr, b->addr)
242 && a->offset_in_region == b->offset_in_region
243 && a->romd_mode == b->romd_mode
244 && a->readonly == b->readonly
245 && a->nonvolatile == b->nonvolatile;
248 static FlatView *flatview_new(MemoryRegion *mr_root)
250 FlatView *view;
252 view = g_new0(FlatView, 1);
253 view->ref = 1;
254 view->root = mr_root;
255 memory_region_ref(mr_root);
256 trace_flatview_new(view, mr_root);
258 return view;
261 /* Insert a range into a given position. Caller is responsible for maintaining
262 * sorting order.
264 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
266 if (view->nr == view->nr_allocated) {
267 view->nr_allocated = MAX(2 * view->nr, 10);
268 view->ranges = g_realloc(view->ranges,
269 view->nr_allocated * sizeof(*view->ranges));
271 memmove(view->ranges + pos + 1, view->ranges + pos,
272 (view->nr - pos) * sizeof(FlatRange));
273 view->ranges[pos] = *range;
274 memory_region_ref(range->mr);
275 ++view->nr;
278 static void flatview_destroy(FlatView *view)
280 int i;
282 trace_flatview_destroy(view, view->root);
283 if (view->dispatch) {
284 address_space_dispatch_free(view->dispatch);
286 for (i = 0; i < view->nr; i++) {
287 memory_region_unref(view->ranges[i].mr);
289 g_free(view->ranges);
290 memory_region_unref(view->root);
291 g_free(view);
294 static bool flatview_ref(FlatView *view)
296 return atomic_fetch_inc_nonzero(&view->ref) > 0;
299 void flatview_unref(FlatView *view)
301 if (atomic_fetch_dec(&view->ref) == 1) {
302 trace_flatview_destroy_rcu(view, view->root);
303 assert(view->root);
304 call_rcu(view, flatview_destroy, rcu);
308 static bool can_merge(FlatRange *r1, FlatRange *r2)
310 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
311 && r1->mr == r2->mr
312 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
313 r1->addr.size),
314 int128_make64(r2->offset_in_region))
315 && r1->dirty_log_mask == r2->dirty_log_mask
316 && r1->romd_mode == r2->romd_mode
317 && r1->readonly == r2->readonly
318 && r1->nonvolatile == r2->nonvolatile;
321 /* Attempt to simplify a view by merging adjacent ranges */
322 static void flatview_simplify(FlatView *view)
324 unsigned i, j, k;
326 i = 0;
327 while (i < view->nr) {
328 j = i + 1;
329 while (j < view->nr
330 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
331 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
332 ++j;
334 ++i;
335 for (k = i; k < j; k++) {
336 memory_region_unref(view->ranges[k].mr);
338 memmove(&view->ranges[i], &view->ranges[j],
339 (view->nr - j) * sizeof(view->ranges[j]));
340 view->nr -= j - i;
344 static bool memory_region_big_endian(MemoryRegion *mr)
346 #ifdef TARGET_WORDS_BIGENDIAN
347 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
348 #else
349 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
350 #endif
353 static bool memory_region_wrong_endianness(MemoryRegion *mr)
355 #ifdef TARGET_WORDS_BIGENDIAN
356 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
357 #else
358 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
359 #endif
362 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
364 if (memory_region_wrong_endianness(mr)) {
365 switch (size) {
366 case 1:
367 break;
368 case 2:
369 *data = bswap16(*data);
370 break;
371 case 4:
372 *data = bswap32(*data);
373 break;
374 case 8:
375 *data = bswap64(*data);
376 break;
377 default:
378 abort();
383 static inline void memory_region_shift_read_access(uint64_t *value,
384 signed shift,
385 uint64_t mask,
386 uint64_t tmp)
388 if (shift >= 0) {
389 *value |= (tmp & mask) << shift;
390 } else {
391 *value |= (tmp & mask) >> -shift;
395 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
396 signed shift,
397 uint64_t mask)
399 uint64_t tmp;
401 if (shift >= 0) {
402 tmp = (*value >> shift) & mask;
403 } else {
404 tmp = (*value << -shift) & mask;
407 return tmp;
410 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
412 MemoryRegion *root;
413 hwaddr abs_addr = offset;
415 abs_addr += mr->addr;
416 for (root = mr; root->container; ) {
417 root = root->container;
418 abs_addr += root->addr;
421 return abs_addr;
424 static int get_cpu_index(void)
426 if (current_cpu) {
427 return current_cpu->cpu_index;
429 return -1;
432 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
433 hwaddr addr,
434 uint64_t *value,
435 unsigned size,
436 signed shift,
437 uint64_t mask,
438 MemTxAttrs attrs)
440 uint64_t tmp;
442 tmp = mr->ops->read(mr->opaque, addr, size);
443 if (mr->subpage) {
444 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
445 } else if (mr == &io_mem_notdirty) {
446 /* Accesses to code which has previously been translated into a TB show
447 * up in the MMIO path, as accesses to the io_mem_notdirty
448 * MemoryRegion. */
449 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
450 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
451 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
452 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
454 memory_region_shift_read_access(value, shift, mask, tmp);
455 return MEMTX_OK;
458 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
459 hwaddr addr,
460 uint64_t *value,
461 unsigned size,
462 signed shift,
463 uint64_t mask,
464 MemTxAttrs attrs)
466 uint64_t tmp = 0;
467 MemTxResult r;
469 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
470 if (mr->subpage) {
471 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
472 } else if (mr == &io_mem_notdirty) {
473 /* Accesses to code which has previously been translated into a TB show
474 * up in the MMIO path, as accesses to the io_mem_notdirty
475 * MemoryRegion. */
476 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
477 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
478 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
479 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
481 memory_region_shift_read_access(value, shift, mask, tmp);
482 return r;
485 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
486 hwaddr addr,
487 uint64_t *value,
488 unsigned size,
489 signed shift,
490 uint64_t mask,
491 MemTxAttrs attrs)
493 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
495 if (mr->subpage) {
496 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
497 } else if (mr == &io_mem_notdirty) {
498 /* Accesses to code which has previously been translated into a TB show
499 * up in the MMIO path, as accesses to the io_mem_notdirty
500 * MemoryRegion. */
501 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
502 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
503 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
504 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
506 mr->ops->write(mr->opaque, addr, tmp, size);
507 return MEMTX_OK;
510 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
511 hwaddr addr,
512 uint64_t *value,
513 unsigned size,
514 signed shift,
515 uint64_t mask,
516 MemTxAttrs attrs)
518 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
520 if (mr->subpage) {
521 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
522 } else if (mr == &io_mem_notdirty) {
523 /* Accesses to code which has previously been translated into a TB show
524 * up in the MMIO path, as accesses to the io_mem_notdirty
525 * MemoryRegion. */
526 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
527 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
528 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
529 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
531 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
534 static MemTxResult access_with_adjusted_size(hwaddr addr,
535 uint64_t *value,
536 unsigned size,
537 unsigned access_size_min,
538 unsigned access_size_max,
539 MemTxResult (*access_fn)
540 (MemoryRegion *mr,
541 hwaddr addr,
542 uint64_t *value,
543 unsigned size,
544 signed shift,
545 uint64_t mask,
546 MemTxAttrs attrs),
547 MemoryRegion *mr,
548 MemTxAttrs attrs)
550 uint64_t access_mask;
551 unsigned access_size;
552 unsigned i;
553 MemTxResult r = MEMTX_OK;
555 if (!access_size_min) {
556 access_size_min = 1;
558 if (!access_size_max) {
559 access_size_max = 4;
562 /* FIXME: support unaligned access? */
563 access_size = MAX(MIN(size, access_size_max), access_size_min);
564 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
565 if (memory_region_big_endian(mr)) {
566 for (i = 0; i < size; i += access_size) {
567 r |= access_fn(mr, addr + i, value, access_size,
568 (size - access_size - i) * 8, access_mask, attrs);
570 } else {
571 for (i = 0; i < size; i += access_size) {
572 r |= access_fn(mr, addr + i, value, access_size, i * 8,
573 access_mask, attrs);
576 return r;
579 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
581 AddressSpace *as;
583 while (mr->container) {
584 mr = mr->container;
586 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
587 if (mr == as->root) {
588 return as;
591 return NULL;
594 /* Render a memory region into the global view. Ranges in @view obscure
595 * ranges in @mr.
597 static void render_memory_region(FlatView *view,
598 MemoryRegion *mr,
599 Int128 base,
600 AddrRange clip,
601 bool readonly,
602 bool nonvolatile)
604 MemoryRegion *subregion;
605 unsigned i;
606 hwaddr offset_in_region;
607 Int128 remain;
608 Int128 now;
609 FlatRange fr;
610 AddrRange tmp;
612 if (!mr->enabled) {
613 return;
616 int128_addto(&base, int128_make64(mr->addr));
617 readonly |= mr->readonly;
618 nonvolatile |= mr->nonvolatile;
620 tmp = addrrange_make(base, mr->size);
622 if (!addrrange_intersects(tmp, clip)) {
623 return;
626 clip = addrrange_intersection(tmp, clip);
628 if (mr->alias) {
629 int128_subfrom(&base, int128_make64(mr->alias->addr));
630 int128_subfrom(&base, int128_make64(mr->alias_offset));
631 render_memory_region(view, mr->alias, base, clip,
632 readonly, nonvolatile);
633 return;
636 /* Render subregions in priority order. */
637 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
638 render_memory_region(view, subregion, base, clip,
639 readonly, nonvolatile);
642 if (!mr->terminates) {
643 return;
646 offset_in_region = int128_get64(int128_sub(clip.start, base));
647 base = clip.start;
648 remain = clip.size;
650 fr.mr = mr;
651 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
652 fr.romd_mode = mr->romd_mode;
653 fr.readonly = readonly;
654 fr.nonvolatile = nonvolatile;
655 fr.has_coalesced_range = 0;
657 /* Render the region itself into any gaps left by the current view. */
658 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
659 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
660 continue;
662 if (int128_lt(base, view->ranges[i].addr.start)) {
663 now = int128_min(remain,
664 int128_sub(view->ranges[i].addr.start, base));
665 fr.offset_in_region = offset_in_region;
666 fr.addr = addrrange_make(base, now);
667 flatview_insert(view, i, &fr);
668 ++i;
669 int128_addto(&base, now);
670 offset_in_region += int128_get64(now);
671 int128_subfrom(&remain, now);
673 now = int128_sub(int128_min(int128_add(base, remain),
674 addrrange_end(view->ranges[i].addr)),
675 base);
676 int128_addto(&base, now);
677 offset_in_region += int128_get64(now);
678 int128_subfrom(&remain, now);
680 if (int128_nz(remain)) {
681 fr.offset_in_region = offset_in_region;
682 fr.addr = addrrange_make(base, remain);
683 flatview_insert(view, i, &fr);
687 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
689 while (mr->enabled) {
690 if (mr->alias) {
691 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
692 /* The alias is included in its entirety. Use it as
693 * the "real" root, so that we can share more FlatViews.
695 mr = mr->alias;
696 continue;
698 } else if (!mr->terminates) {
699 unsigned int found = 0;
700 MemoryRegion *child, *next = NULL;
701 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
702 if (child->enabled) {
703 if (++found > 1) {
704 next = NULL;
705 break;
707 if (!child->addr && int128_ge(mr->size, child->size)) {
708 /* A child is included in its entirety. If it's the only
709 * enabled one, use it in the hope of finding an alias down the
710 * way. This will also let us share FlatViews.
712 next = child;
716 if (found == 0) {
717 return NULL;
719 if (next) {
720 mr = next;
721 continue;
725 return mr;
728 return NULL;
731 /* Render a memory topology into a list of disjoint absolute ranges. */
732 static FlatView *generate_memory_topology(MemoryRegion *mr)
734 int i;
735 FlatView *view;
737 view = flatview_new(mr);
739 if (mr) {
740 render_memory_region(view, mr, int128_zero(),
741 addrrange_make(int128_zero(), int128_2_64()),
742 false, false);
744 flatview_simplify(view);
746 view->dispatch = address_space_dispatch_new(view);
747 for (i = 0; i < view->nr; i++) {
748 MemoryRegionSection mrs =
749 section_from_flat_range(&view->ranges[i], view);
750 flatview_add_to_dispatch(view, &mrs);
752 address_space_dispatch_compact(view->dispatch);
753 g_hash_table_replace(flat_views, mr, view);
755 return view;
758 static void address_space_add_del_ioeventfds(AddressSpace *as,
759 MemoryRegionIoeventfd *fds_new,
760 unsigned fds_new_nb,
761 MemoryRegionIoeventfd *fds_old,
762 unsigned fds_old_nb)
764 unsigned iold, inew;
765 MemoryRegionIoeventfd *fd;
766 MemoryRegionSection section;
768 /* Generate a symmetric difference of the old and new fd sets, adding
769 * and deleting as necessary.
772 iold = inew = 0;
773 while (iold < fds_old_nb || inew < fds_new_nb) {
774 if (iold < fds_old_nb
775 && (inew == fds_new_nb
776 || memory_region_ioeventfd_before(&fds_old[iold],
777 &fds_new[inew]))) {
778 fd = &fds_old[iold];
779 section = (MemoryRegionSection) {
780 .fv = address_space_to_flatview(as),
781 .offset_within_address_space = int128_get64(fd->addr.start),
782 .size = fd->addr.size,
784 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
785 fd->match_data, fd->data, fd->e);
786 ++iold;
787 } else if (inew < fds_new_nb
788 && (iold == fds_old_nb
789 || memory_region_ioeventfd_before(&fds_new[inew],
790 &fds_old[iold]))) {
791 fd = &fds_new[inew];
792 section = (MemoryRegionSection) {
793 .fv = address_space_to_flatview(as),
794 .offset_within_address_space = int128_get64(fd->addr.start),
795 .size = fd->addr.size,
797 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
798 fd->match_data, fd->data, fd->e);
799 ++inew;
800 } else {
801 ++iold;
802 ++inew;
807 FlatView *address_space_get_flatview(AddressSpace *as)
809 FlatView *view;
811 rcu_read_lock();
812 do {
813 view = address_space_to_flatview(as);
814 /* If somebody has replaced as->current_map concurrently,
815 * flatview_ref returns false.
817 } while (!flatview_ref(view));
818 rcu_read_unlock();
819 return view;
822 static void address_space_update_ioeventfds(AddressSpace *as)
824 FlatView *view;
825 FlatRange *fr;
826 unsigned ioeventfd_nb = 0;
827 MemoryRegionIoeventfd *ioeventfds = NULL;
828 AddrRange tmp;
829 unsigned i;
831 view = address_space_get_flatview(as);
832 FOR_EACH_FLAT_RANGE(fr, view) {
833 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
834 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
835 int128_sub(fr->addr.start,
836 int128_make64(fr->offset_in_region)));
837 if (addrrange_intersects(fr->addr, tmp)) {
838 ++ioeventfd_nb;
839 ioeventfds = g_realloc(ioeventfds,
840 ioeventfd_nb * sizeof(*ioeventfds));
841 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
842 ioeventfds[ioeventfd_nb-1].addr = tmp;
847 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
848 as->ioeventfds, as->ioeventfd_nb);
850 g_free(as->ioeventfds);
851 as->ioeventfds = ioeventfds;
852 as->ioeventfd_nb = ioeventfd_nb;
853 flatview_unref(view);
856 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
858 if (!fr->has_coalesced_range) {
859 return;
862 if (--fr->has_coalesced_range > 0) {
863 return;
866 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
867 int128_get64(fr->addr.start),
868 int128_get64(fr->addr.size));
871 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
873 MemoryRegion *mr = fr->mr;
874 CoalescedMemoryRange *cmr;
875 AddrRange tmp;
877 if (QTAILQ_EMPTY(&mr->coalesced)) {
878 return;
881 if (fr->has_coalesced_range++) {
882 return;
885 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
886 tmp = addrrange_shift(cmr->addr,
887 int128_sub(fr->addr.start,
888 int128_make64(fr->offset_in_region)));
889 if (!addrrange_intersects(tmp, fr->addr)) {
890 continue;
892 tmp = addrrange_intersection(tmp, fr->addr);
893 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
894 int128_get64(tmp.start),
895 int128_get64(tmp.size));
899 static void address_space_update_topology_pass(AddressSpace *as,
900 const FlatView *old_view,
901 const FlatView *new_view,
902 bool adding)
904 unsigned iold, inew;
905 FlatRange *frold, *frnew;
907 /* Generate a symmetric difference of the old and new memory maps.
908 * Kill ranges in the old map, and instantiate ranges in the new map.
910 iold = inew = 0;
911 while (iold < old_view->nr || inew < new_view->nr) {
912 if (iold < old_view->nr) {
913 frold = &old_view->ranges[iold];
914 } else {
915 frold = NULL;
917 if (inew < new_view->nr) {
918 frnew = &new_view->ranges[inew];
919 } else {
920 frnew = NULL;
923 if (frold
924 && (!frnew
925 || int128_lt(frold->addr.start, frnew->addr.start)
926 || (int128_eq(frold->addr.start, frnew->addr.start)
927 && !flatrange_equal(frold, frnew)))) {
928 /* In old but not in new, or in both but attributes changed. */
930 if (!adding) {
931 flat_range_coalesced_io_del(frold, as);
932 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
935 ++iold;
936 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
937 /* In both and unchanged (except logging may have changed) */
939 if (adding) {
940 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
941 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
942 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
943 frold->dirty_log_mask,
944 frnew->dirty_log_mask);
946 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
947 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
948 frold->dirty_log_mask,
949 frnew->dirty_log_mask);
953 ++iold;
954 ++inew;
955 } else {
956 /* In new */
958 if (adding) {
959 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
960 flat_range_coalesced_io_add(frnew, as);
963 ++inew;
968 static void flatviews_init(void)
970 static FlatView *empty_view;
972 if (flat_views) {
973 return;
976 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
977 (GDestroyNotify) flatview_unref);
978 if (!empty_view) {
979 empty_view = generate_memory_topology(NULL);
980 /* We keep it alive forever in the global variable. */
981 flatview_ref(empty_view);
982 } else {
983 g_hash_table_replace(flat_views, NULL, empty_view);
984 flatview_ref(empty_view);
988 static void flatviews_reset(void)
990 AddressSpace *as;
992 if (flat_views) {
993 g_hash_table_unref(flat_views);
994 flat_views = NULL;
996 flatviews_init();
998 /* Render unique FVs */
999 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1000 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1002 if (g_hash_table_lookup(flat_views, physmr)) {
1003 continue;
1006 generate_memory_topology(physmr);
1010 static void address_space_set_flatview(AddressSpace *as)
1012 FlatView *old_view = address_space_to_flatview(as);
1013 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1014 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1016 assert(new_view);
1018 if (old_view == new_view) {
1019 return;
1022 if (old_view) {
1023 flatview_ref(old_view);
1026 flatview_ref(new_view);
1028 if (!QTAILQ_EMPTY(&as->listeners)) {
1029 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1031 if (!old_view2) {
1032 old_view2 = &tmpview;
1034 address_space_update_topology_pass(as, old_view2, new_view, false);
1035 address_space_update_topology_pass(as, old_view2, new_view, true);
1038 /* Writes are protected by the BQL. */
1039 atomic_rcu_set(&as->current_map, new_view);
1040 if (old_view) {
1041 flatview_unref(old_view);
1044 /* Note that all the old MemoryRegions are still alive up to this
1045 * point. This relieves most MemoryListeners from the need to
1046 * ref/unref the MemoryRegions they get---unless they use them
1047 * outside the iothread mutex, in which case precise reference
1048 * counting is necessary.
1050 if (old_view) {
1051 flatview_unref(old_view);
1055 static void address_space_update_topology(AddressSpace *as)
1057 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1059 flatviews_init();
1060 if (!g_hash_table_lookup(flat_views, physmr)) {
1061 generate_memory_topology(physmr);
1063 address_space_set_flatview(as);
1066 void memory_region_transaction_begin(void)
1068 qemu_flush_coalesced_mmio_buffer();
1069 ++memory_region_transaction_depth;
1072 void memory_region_transaction_commit(void)
1074 AddressSpace *as;
1076 assert(memory_region_transaction_depth);
1077 assert(qemu_mutex_iothread_locked());
1079 --memory_region_transaction_depth;
1080 if (!memory_region_transaction_depth) {
1081 if (memory_region_update_pending) {
1082 flatviews_reset();
1084 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1086 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1087 address_space_set_flatview(as);
1088 address_space_update_ioeventfds(as);
1090 memory_region_update_pending = false;
1091 ioeventfd_update_pending = false;
1092 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1093 } else if (ioeventfd_update_pending) {
1094 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1095 address_space_update_ioeventfds(as);
1097 ioeventfd_update_pending = false;
1102 static void memory_region_destructor_none(MemoryRegion *mr)
1106 static void memory_region_destructor_ram(MemoryRegion *mr)
1108 qemu_ram_free(mr->ram_block);
1111 static bool memory_region_need_escape(char c)
1113 return c == '/' || c == '[' || c == '\\' || c == ']';
1116 static char *memory_region_escape_name(const char *name)
1118 const char *p;
1119 char *escaped, *q;
1120 uint8_t c;
1121 size_t bytes = 0;
1123 for (p = name; *p; p++) {
1124 bytes += memory_region_need_escape(*p) ? 4 : 1;
1126 if (bytes == p - name) {
1127 return g_memdup(name, bytes + 1);
1130 escaped = g_malloc(bytes + 1);
1131 for (p = name, q = escaped; *p; p++) {
1132 c = *p;
1133 if (unlikely(memory_region_need_escape(c))) {
1134 *q++ = '\\';
1135 *q++ = 'x';
1136 *q++ = "0123456789abcdef"[c >> 4];
1137 c = "0123456789abcdef"[c & 15];
1139 *q++ = c;
1141 *q = 0;
1142 return escaped;
1145 static void memory_region_do_init(MemoryRegion *mr,
1146 Object *owner,
1147 const char *name,
1148 uint64_t size)
1150 mr->size = int128_make64(size);
1151 if (size == UINT64_MAX) {
1152 mr->size = int128_2_64();
1154 mr->name = g_strdup(name);
1155 mr->owner = owner;
1156 mr->ram_block = NULL;
1158 if (name) {
1159 char *escaped_name = memory_region_escape_name(name);
1160 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1162 if (!owner) {
1163 owner = container_get(qdev_get_machine(), "/unattached");
1166 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1167 object_unref(OBJECT(mr));
1168 g_free(name_array);
1169 g_free(escaped_name);
1173 void memory_region_init(MemoryRegion *mr,
1174 Object *owner,
1175 const char *name,
1176 uint64_t size)
1178 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1179 memory_region_do_init(mr, owner, name, size);
1182 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1183 void *opaque, Error **errp)
1185 MemoryRegion *mr = MEMORY_REGION(obj);
1186 uint64_t value = mr->addr;
1188 visit_type_uint64(v, name, &value, errp);
1191 static void memory_region_get_container(Object *obj, Visitor *v,
1192 const char *name, void *opaque,
1193 Error **errp)
1195 MemoryRegion *mr = MEMORY_REGION(obj);
1196 gchar *path = (gchar *)"";
1198 if (mr->container) {
1199 path = object_get_canonical_path(OBJECT(mr->container));
1201 visit_type_str(v, name, &path, errp);
1202 if (mr->container) {
1203 g_free(path);
1207 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1208 const char *part)
1210 MemoryRegion *mr = MEMORY_REGION(obj);
1212 return OBJECT(mr->container);
1215 static void memory_region_get_priority(Object *obj, Visitor *v,
1216 const char *name, void *opaque,
1217 Error **errp)
1219 MemoryRegion *mr = MEMORY_REGION(obj);
1220 int32_t value = mr->priority;
1222 visit_type_int32(v, name, &value, errp);
1225 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1226 void *opaque, Error **errp)
1228 MemoryRegion *mr = MEMORY_REGION(obj);
1229 uint64_t value = memory_region_size(mr);
1231 visit_type_uint64(v, name, &value, errp);
1234 static void memory_region_initfn(Object *obj)
1236 MemoryRegion *mr = MEMORY_REGION(obj);
1237 ObjectProperty *op;
1239 mr->ops = &unassigned_mem_ops;
1240 mr->enabled = true;
1241 mr->romd_mode = true;
1242 mr->global_locking = true;
1243 mr->destructor = memory_region_destructor_none;
1244 QTAILQ_INIT(&mr->subregions);
1245 QTAILQ_INIT(&mr->coalesced);
1247 op = object_property_add(OBJECT(mr), "container",
1248 "link<" TYPE_MEMORY_REGION ">",
1249 memory_region_get_container,
1250 NULL, /* memory_region_set_container */
1251 NULL, NULL, &error_abort);
1252 op->resolve = memory_region_resolve_container;
1254 object_property_add(OBJECT(mr), "addr", "uint64",
1255 memory_region_get_addr,
1256 NULL, /* memory_region_set_addr */
1257 NULL, NULL, &error_abort);
1258 object_property_add(OBJECT(mr), "priority", "uint32",
1259 memory_region_get_priority,
1260 NULL, /* memory_region_set_priority */
1261 NULL, NULL, &error_abort);
1262 object_property_add(OBJECT(mr), "size", "uint64",
1263 memory_region_get_size,
1264 NULL, /* memory_region_set_size, */
1265 NULL, NULL, &error_abort);
1268 static void iommu_memory_region_initfn(Object *obj)
1270 MemoryRegion *mr = MEMORY_REGION(obj);
1272 mr->is_iommu = true;
1275 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1276 unsigned size)
1278 #ifdef DEBUG_UNASSIGNED
1279 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1280 #endif
1281 if (current_cpu != NULL) {
1282 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1283 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
1285 return 0;
1288 static void unassigned_mem_write(void *opaque, hwaddr addr,
1289 uint64_t val, unsigned size)
1291 #ifdef DEBUG_UNASSIGNED
1292 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1293 #endif
1294 if (current_cpu != NULL) {
1295 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1299 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1300 unsigned size, bool is_write,
1301 MemTxAttrs attrs)
1303 return false;
1306 const MemoryRegionOps unassigned_mem_ops = {
1307 .valid.accepts = unassigned_mem_accepts,
1308 .endianness = DEVICE_NATIVE_ENDIAN,
1311 static uint64_t memory_region_ram_device_read(void *opaque,
1312 hwaddr addr, unsigned size)
1314 MemoryRegion *mr = opaque;
1315 uint64_t data = (uint64_t)~0;
1317 switch (size) {
1318 case 1:
1319 data = *(uint8_t *)(mr->ram_block->host + addr);
1320 break;
1321 case 2:
1322 data = *(uint16_t *)(mr->ram_block->host + addr);
1323 break;
1324 case 4:
1325 data = *(uint32_t *)(mr->ram_block->host + addr);
1326 break;
1327 case 8:
1328 data = *(uint64_t *)(mr->ram_block->host + addr);
1329 break;
1332 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1334 return data;
1337 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1338 uint64_t data, unsigned size)
1340 MemoryRegion *mr = opaque;
1342 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1344 switch (size) {
1345 case 1:
1346 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1347 break;
1348 case 2:
1349 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1350 break;
1351 case 4:
1352 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1353 break;
1354 case 8:
1355 *(uint64_t *)(mr->ram_block->host + addr) = data;
1356 break;
1360 static const MemoryRegionOps ram_device_mem_ops = {
1361 .read = memory_region_ram_device_read,
1362 .write = memory_region_ram_device_write,
1363 .endianness = DEVICE_HOST_ENDIAN,
1364 .valid = {
1365 .min_access_size = 1,
1366 .max_access_size = 8,
1367 .unaligned = true,
1369 .impl = {
1370 .min_access_size = 1,
1371 .max_access_size = 8,
1372 .unaligned = true,
1376 bool memory_region_access_valid(MemoryRegion *mr,
1377 hwaddr addr,
1378 unsigned size,
1379 bool is_write,
1380 MemTxAttrs attrs)
1382 int access_size_min, access_size_max;
1383 int access_size, i;
1385 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1386 return false;
1389 if (!mr->ops->valid.accepts) {
1390 return true;
1393 access_size_min = mr->ops->valid.min_access_size;
1394 if (!mr->ops->valid.min_access_size) {
1395 access_size_min = 1;
1398 access_size_max = mr->ops->valid.max_access_size;
1399 if (!mr->ops->valid.max_access_size) {
1400 access_size_max = 4;
1403 access_size = MAX(MIN(size, access_size_max), access_size_min);
1404 for (i = 0; i < size; i += access_size) {
1405 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1406 is_write, attrs)) {
1407 return false;
1411 return true;
1414 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1415 hwaddr addr,
1416 uint64_t *pval,
1417 unsigned size,
1418 MemTxAttrs attrs)
1420 *pval = 0;
1422 if (mr->ops->read) {
1423 return access_with_adjusted_size(addr, pval, size,
1424 mr->ops->impl.min_access_size,
1425 mr->ops->impl.max_access_size,
1426 memory_region_read_accessor,
1427 mr, attrs);
1428 } else {
1429 return access_with_adjusted_size(addr, pval, size,
1430 mr->ops->impl.min_access_size,
1431 mr->ops->impl.max_access_size,
1432 memory_region_read_with_attrs_accessor,
1433 mr, attrs);
1437 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1438 hwaddr addr,
1439 uint64_t *pval,
1440 unsigned size,
1441 MemTxAttrs attrs)
1443 MemTxResult r;
1445 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1446 *pval = unassigned_mem_read(mr, addr, size);
1447 return MEMTX_DECODE_ERROR;
1450 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1451 adjust_endianness(mr, pval, size);
1452 return r;
1455 /* Return true if an eventfd was signalled */
1456 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1457 hwaddr addr,
1458 uint64_t data,
1459 unsigned size,
1460 MemTxAttrs attrs)
1462 MemoryRegionIoeventfd ioeventfd = {
1463 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1464 .data = data,
1466 unsigned i;
1468 for (i = 0; i < mr->ioeventfd_nb; i++) {
1469 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1470 ioeventfd.e = mr->ioeventfds[i].e;
1472 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1473 event_notifier_set(ioeventfd.e);
1474 return true;
1478 return false;
1481 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1482 hwaddr addr,
1483 uint64_t data,
1484 unsigned size,
1485 MemTxAttrs attrs)
1487 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1488 unassigned_mem_write(mr, addr, data, size);
1489 return MEMTX_DECODE_ERROR;
1492 adjust_endianness(mr, &data, size);
1494 if ((!kvm_eventfds_enabled()) &&
1495 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1496 return MEMTX_OK;
1499 if (mr->ops->write) {
1500 return access_with_adjusted_size(addr, &data, size,
1501 mr->ops->impl.min_access_size,
1502 mr->ops->impl.max_access_size,
1503 memory_region_write_accessor, mr,
1504 attrs);
1505 } else {
1506 return
1507 access_with_adjusted_size(addr, &data, size,
1508 mr->ops->impl.min_access_size,
1509 mr->ops->impl.max_access_size,
1510 memory_region_write_with_attrs_accessor,
1511 mr, attrs);
1515 void memory_region_init_io(MemoryRegion *mr,
1516 Object *owner,
1517 const MemoryRegionOps *ops,
1518 void *opaque,
1519 const char *name,
1520 uint64_t size)
1522 memory_region_init(mr, owner, name, size);
1523 mr->ops = ops ? ops : &unassigned_mem_ops;
1524 mr->opaque = opaque;
1525 mr->terminates = true;
1528 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1529 Object *owner,
1530 const char *name,
1531 uint64_t size,
1532 Error **errp)
1534 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1537 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1538 Object *owner,
1539 const char *name,
1540 uint64_t size,
1541 bool share,
1542 Error **errp)
1544 Error *err = NULL;
1545 memory_region_init(mr, owner, name, size);
1546 mr->ram = true;
1547 mr->terminates = true;
1548 mr->destructor = memory_region_destructor_ram;
1549 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
1550 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1551 if (err) {
1552 mr->size = int128_zero();
1553 object_unparent(OBJECT(mr));
1554 error_propagate(errp, err);
1558 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1559 Object *owner,
1560 const char *name,
1561 uint64_t size,
1562 uint64_t max_size,
1563 void (*resized)(const char*,
1564 uint64_t length,
1565 void *host),
1566 Error **errp)
1568 Error *err = NULL;
1569 memory_region_init(mr, owner, name, size);
1570 mr->ram = true;
1571 mr->terminates = true;
1572 mr->destructor = memory_region_destructor_ram;
1573 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1574 mr, &err);
1575 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1576 if (err) {
1577 mr->size = int128_zero();
1578 object_unparent(OBJECT(mr));
1579 error_propagate(errp, err);
1583 #ifdef CONFIG_POSIX
1584 void memory_region_init_ram_from_file(MemoryRegion *mr,
1585 struct Object *owner,
1586 const char *name,
1587 uint64_t size,
1588 uint64_t align,
1589 uint32_t ram_flags,
1590 const char *path,
1591 Error **errp)
1593 Error *err = NULL;
1594 memory_region_init(mr, owner, name, size);
1595 mr->ram = true;
1596 mr->terminates = true;
1597 mr->destructor = memory_region_destructor_ram;
1598 mr->align = align;
1599 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
1600 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1601 if (err) {
1602 mr->size = int128_zero();
1603 object_unparent(OBJECT(mr));
1604 error_propagate(errp, err);
1608 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1609 struct Object *owner,
1610 const char *name,
1611 uint64_t size,
1612 bool share,
1613 int fd,
1614 Error **errp)
1616 Error *err = NULL;
1617 memory_region_init(mr, owner, name, size);
1618 mr->ram = true;
1619 mr->terminates = true;
1620 mr->destructor = memory_region_destructor_ram;
1621 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1622 share ? RAM_SHARED : 0,
1623 fd, &err);
1624 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1625 if (err) {
1626 mr->size = int128_zero();
1627 object_unparent(OBJECT(mr));
1628 error_propagate(errp, err);
1631 #endif
1633 void memory_region_init_ram_ptr(MemoryRegion *mr,
1634 Object *owner,
1635 const char *name,
1636 uint64_t size,
1637 void *ptr)
1639 memory_region_init(mr, owner, name, size);
1640 mr->ram = true;
1641 mr->terminates = true;
1642 mr->destructor = memory_region_destructor_ram;
1643 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1645 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1646 assert(ptr != NULL);
1647 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1650 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1651 Object *owner,
1652 const char *name,
1653 uint64_t size,
1654 void *ptr)
1656 memory_region_init(mr, owner, name, size);
1657 mr->ram = true;
1658 mr->terminates = true;
1659 mr->ram_device = true;
1660 mr->ops = &ram_device_mem_ops;
1661 mr->opaque = mr;
1662 mr->destructor = memory_region_destructor_ram;
1663 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1664 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1665 assert(ptr != NULL);
1666 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1669 void memory_region_init_alias(MemoryRegion *mr,
1670 Object *owner,
1671 const char *name,
1672 MemoryRegion *orig,
1673 hwaddr offset,
1674 uint64_t size)
1676 memory_region_init(mr, owner, name, size);
1677 mr->alias = orig;
1678 mr->alias_offset = offset;
1681 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1682 struct Object *owner,
1683 const char *name,
1684 uint64_t size,
1685 Error **errp)
1687 Error *err = NULL;
1688 memory_region_init(mr, owner, name, size);
1689 mr->ram = true;
1690 mr->readonly = true;
1691 mr->terminates = true;
1692 mr->destructor = memory_region_destructor_ram;
1693 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1694 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1695 if (err) {
1696 mr->size = int128_zero();
1697 object_unparent(OBJECT(mr));
1698 error_propagate(errp, err);
1702 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1703 Object *owner,
1704 const MemoryRegionOps *ops,
1705 void *opaque,
1706 const char *name,
1707 uint64_t size,
1708 Error **errp)
1710 Error *err = NULL;
1711 assert(ops);
1712 memory_region_init(mr, owner, name, size);
1713 mr->ops = ops;
1714 mr->opaque = opaque;
1715 mr->terminates = true;
1716 mr->rom_device = true;
1717 mr->destructor = memory_region_destructor_ram;
1718 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1719 if (err) {
1720 mr->size = int128_zero();
1721 object_unparent(OBJECT(mr));
1722 error_propagate(errp, err);
1726 void memory_region_init_iommu(void *_iommu_mr,
1727 size_t instance_size,
1728 const char *mrtypename,
1729 Object *owner,
1730 const char *name,
1731 uint64_t size)
1733 struct IOMMUMemoryRegion *iommu_mr;
1734 struct MemoryRegion *mr;
1736 object_initialize(_iommu_mr, instance_size, mrtypename);
1737 mr = MEMORY_REGION(_iommu_mr);
1738 memory_region_do_init(mr, owner, name, size);
1739 iommu_mr = IOMMU_MEMORY_REGION(mr);
1740 mr->terminates = true; /* then re-forwards */
1741 QLIST_INIT(&iommu_mr->iommu_notify);
1742 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1745 static void memory_region_finalize(Object *obj)
1747 MemoryRegion *mr = MEMORY_REGION(obj);
1749 assert(!mr->container);
1751 /* We know the region is not visible in any address space (it
1752 * does not have a container and cannot be a root either because
1753 * it has no references, so we can blindly clear mr->enabled.
1754 * memory_region_set_enabled instead could trigger a transaction
1755 * and cause an infinite loop.
1757 mr->enabled = false;
1758 memory_region_transaction_begin();
1759 while (!QTAILQ_EMPTY(&mr->subregions)) {
1760 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1761 memory_region_del_subregion(mr, subregion);
1763 memory_region_transaction_commit();
1765 mr->destructor(mr);
1766 memory_region_clear_coalescing(mr);
1767 g_free((char *)mr->name);
1768 g_free(mr->ioeventfds);
1771 Object *memory_region_owner(MemoryRegion *mr)
1773 Object *obj = OBJECT(mr);
1774 return obj->parent;
1777 void memory_region_ref(MemoryRegion *mr)
1779 /* MMIO callbacks most likely will access data that belongs
1780 * to the owner, hence the need to ref/unref the owner whenever
1781 * the memory region is in use.
1783 * The memory region is a child of its owner. As long as the
1784 * owner doesn't call unparent itself on the memory region,
1785 * ref-ing the owner will also keep the memory region alive.
1786 * Memory regions without an owner are supposed to never go away;
1787 * we do not ref/unref them because it slows down DMA sensibly.
1789 if (mr && mr->owner) {
1790 object_ref(mr->owner);
1794 void memory_region_unref(MemoryRegion *mr)
1796 if (mr && mr->owner) {
1797 object_unref(mr->owner);
1801 uint64_t memory_region_size(MemoryRegion *mr)
1803 if (int128_eq(mr->size, int128_2_64())) {
1804 return UINT64_MAX;
1806 return int128_get64(mr->size);
1809 const char *memory_region_name(const MemoryRegion *mr)
1811 if (!mr->name) {
1812 ((MemoryRegion *)mr)->name =
1813 object_get_canonical_path_component(OBJECT(mr));
1815 return mr->name;
1818 bool memory_region_is_ram_device(MemoryRegion *mr)
1820 return mr->ram_device;
1823 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1825 uint8_t mask = mr->dirty_log_mask;
1826 if (global_dirty_log && mr->ram_block) {
1827 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1829 return mask;
1832 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1834 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1837 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1839 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1840 IOMMUNotifier *iommu_notifier;
1841 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1843 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1844 flags |= iommu_notifier->notifier_flags;
1847 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1848 imrc->notify_flag_changed(iommu_mr,
1849 iommu_mr->iommu_notify_flags,
1850 flags);
1853 iommu_mr->iommu_notify_flags = flags;
1856 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1857 IOMMUNotifier *n)
1859 IOMMUMemoryRegion *iommu_mr;
1861 if (mr->alias) {
1862 memory_region_register_iommu_notifier(mr->alias, n);
1863 return;
1866 /* We need to register for at least one bitfield */
1867 iommu_mr = IOMMU_MEMORY_REGION(mr);
1868 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1869 assert(n->start <= n->end);
1870 assert(n->iommu_idx >= 0 &&
1871 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1873 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1874 memory_region_update_iommu_notify_flags(iommu_mr);
1877 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1879 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1881 if (imrc->get_min_page_size) {
1882 return imrc->get_min_page_size(iommu_mr);
1884 return TARGET_PAGE_SIZE;
1887 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1889 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1890 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1891 hwaddr addr, granularity;
1892 IOMMUTLBEntry iotlb;
1894 /* If the IOMMU has its own replay callback, override */
1895 if (imrc->replay) {
1896 imrc->replay(iommu_mr, n);
1897 return;
1900 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1902 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1903 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1904 if (iotlb.perm != IOMMU_NONE) {
1905 n->notify(n, &iotlb);
1908 /* if (2^64 - MR size) < granularity, it's possible to get an
1909 * infinite loop here. This should catch such a wraparound */
1910 if ((addr + granularity) < addr) {
1911 break;
1916 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
1918 IOMMUNotifier *notifier;
1920 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1921 memory_region_iommu_replay(iommu_mr, notifier);
1925 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1926 IOMMUNotifier *n)
1928 IOMMUMemoryRegion *iommu_mr;
1930 if (mr->alias) {
1931 memory_region_unregister_iommu_notifier(mr->alias, n);
1932 return;
1934 QLIST_REMOVE(n, node);
1935 iommu_mr = IOMMU_MEMORY_REGION(mr);
1936 memory_region_update_iommu_notify_flags(iommu_mr);
1939 void memory_region_notify_one(IOMMUNotifier *notifier,
1940 IOMMUTLBEntry *entry)
1942 IOMMUNotifierFlag request_flags;
1945 * Skip the notification if the notification does not overlap
1946 * with registered range.
1948 if (notifier->start > entry->iova + entry->addr_mask ||
1949 notifier->end < entry->iova) {
1950 return;
1953 if (entry->perm & IOMMU_RW) {
1954 request_flags = IOMMU_NOTIFIER_MAP;
1955 } else {
1956 request_flags = IOMMU_NOTIFIER_UNMAP;
1959 if (notifier->notifier_flags & request_flags) {
1960 notifier->notify(notifier, entry);
1964 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1965 int iommu_idx,
1966 IOMMUTLBEntry entry)
1968 IOMMUNotifier *iommu_notifier;
1970 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1972 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1973 if (iommu_notifier->iommu_idx == iommu_idx) {
1974 memory_region_notify_one(iommu_notifier, &entry);
1979 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1980 enum IOMMUMemoryRegionAttr attr,
1981 void *data)
1983 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1985 if (!imrc->get_attr) {
1986 return -EINVAL;
1989 return imrc->get_attr(iommu_mr, attr, data);
1992 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1993 MemTxAttrs attrs)
1995 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1997 if (!imrc->attrs_to_index) {
1998 return 0;
2001 return imrc->attrs_to_index(iommu_mr, attrs);
2004 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2006 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2008 if (!imrc->num_indexes) {
2009 return 1;
2012 return imrc->num_indexes(iommu_mr);
2015 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2017 uint8_t mask = 1 << client;
2018 uint8_t old_logging;
2020 assert(client == DIRTY_MEMORY_VGA);
2021 old_logging = mr->vga_logging_count;
2022 mr->vga_logging_count += log ? 1 : -1;
2023 if (!!old_logging == !!mr->vga_logging_count) {
2024 return;
2027 memory_region_transaction_begin();
2028 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2029 memory_region_update_pending |= mr->enabled;
2030 memory_region_transaction_commit();
2033 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2034 hwaddr size)
2036 assert(mr->ram_block);
2037 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2038 size,
2039 memory_region_get_dirty_log_mask(mr));
2042 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
2044 MemoryListener *listener;
2045 AddressSpace *as;
2046 FlatView *view;
2047 FlatRange *fr;
2049 /* If the same address space has multiple log_sync listeners, we
2050 * visit that address space's FlatView multiple times. But because
2051 * log_sync listeners are rare, it's still cheaper than walking each
2052 * address space once.
2054 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2055 if (!listener->log_sync) {
2056 continue;
2058 as = listener->address_space;
2059 view = address_space_get_flatview(as);
2060 FOR_EACH_FLAT_RANGE(fr, view) {
2061 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2062 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2063 listener->log_sync(listener, &mrs);
2066 flatview_unref(view);
2070 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2071 hwaddr len)
2073 MemoryRegionSection mrs;
2074 MemoryListener *listener;
2075 AddressSpace *as;
2076 FlatView *view;
2077 FlatRange *fr;
2078 hwaddr sec_start, sec_end, sec_size;
2080 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2081 if (!listener->log_clear) {
2082 continue;
2084 as = listener->address_space;
2085 view = address_space_get_flatview(as);
2086 FOR_EACH_FLAT_RANGE(fr, view) {
2087 if (!fr->dirty_log_mask || fr->mr != mr) {
2089 * Clear dirty bitmap operation only applies to those
2090 * regions whose dirty logging is at least enabled
2092 continue;
2095 mrs = section_from_flat_range(fr, view);
2097 sec_start = MAX(mrs.offset_within_region, start);
2098 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2099 sec_end = MIN(sec_end, start + len);
2101 if (sec_start >= sec_end) {
2103 * If this memory region section has no intersection
2104 * with the requested range, skip.
2106 continue;
2109 /* Valid case; shrink the section if needed */
2110 mrs.offset_within_address_space +=
2111 sec_start - mrs.offset_within_region;
2112 mrs.offset_within_region = sec_start;
2113 sec_size = sec_end - sec_start;
2114 mrs.size = int128_make64(sec_size);
2115 listener->log_clear(listener, &mrs);
2117 flatview_unref(view);
2121 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2122 hwaddr addr,
2123 hwaddr size,
2124 unsigned client)
2126 assert(mr->ram_block);
2127 memory_region_sync_dirty_bitmap(mr);
2128 return cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2131 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2132 hwaddr addr, hwaddr size)
2134 assert(mr->ram_block);
2135 return cpu_physical_memory_snapshot_get_dirty(snap,
2136 memory_region_get_ram_addr(mr) + addr, size);
2139 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2141 if (mr->readonly != readonly) {
2142 memory_region_transaction_begin();
2143 mr->readonly = readonly;
2144 memory_region_update_pending |= mr->enabled;
2145 memory_region_transaction_commit();
2149 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2151 if (mr->nonvolatile != nonvolatile) {
2152 memory_region_transaction_begin();
2153 mr->nonvolatile = nonvolatile;
2154 memory_region_update_pending |= mr->enabled;
2155 memory_region_transaction_commit();
2159 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2161 if (mr->romd_mode != romd_mode) {
2162 memory_region_transaction_begin();
2163 mr->romd_mode = romd_mode;
2164 memory_region_update_pending |= mr->enabled;
2165 memory_region_transaction_commit();
2169 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2170 hwaddr size, unsigned client)
2172 assert(mr->ram_block);
2173 cpu_physical_memory_test_and_clear_dirty(
2174 memory_region_get_ram_addr(mr) + addr, size, client);
2177 int memory_region_get_fd(MemoryRegion *mr)
2179 int fd;
2181 rcu_read_lock();
2182 while (mr->alias) {
2183 mr = mr->alias;
2185 fd = mr->ram_block->fd;
2186 rcu_read_unlock();
2188 return fd;
2191 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2193 void *ptr;
2194 uint64_t offset = 0;
2196 rcu_read_lock();
2197 while (mr->alias) {
2198 offset += mr->alias_offset;
2199 mr = mr->alias;
2201 assert(mr->ram_block);
2202 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2203 rcu_read_unlock();
2205 return ptr;
2208 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2210 RAMBlock *block;
2212 block = qemu_ram_block_from_host(ptr, false, offset);
2213 if (!block) {
2214 return NULL;
2217 return block->mr;
2220 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2222 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2225 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2227 assert(mr->ram_block);
2229 qemu_ram_resize(mr->ram_block, newsize, errp);
2232 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
2234 FlatView *view;
2235 FlatRange *fr;
2237 view = address_space_get_flatview(as);
2238 FOR_EACH_FLAT_RANGE(fr, view) {
2239 if (fr->mr == mr) {
2240 flat_range_coalesced_io_del(fr, as);
2241 flat_range_coalesced_io_add(fr, as);
2244 flatview_unref(view);
2247 static void memory_region_update_coalesced_range(MemoryRegion *mr)
2249 AddressSpace *as;
2251 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2252 memory_region_update_coalesced_range_as(mr, as);
2256 void memory_region_set_coalescing(MemoryRegion *mr)
2258 memory_region_clear_coalescing(mr);
2259 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2262 void memory_region_add_coalescing(MemoryRegion *mr,
2263 hwaddr offset,
2264 uint64_t size)
2266 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2268 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2269 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2270 memory_region_update_coalesced_range(mr);
2271 memory_region_set_flush_coalesced(mr);
2274 void memory_region_clear_coalescing(MemoryRegion *mr)
2276 CoalescedMemoryRange *cmr;
2277 bool updated = false;
2279 qemu_flush_coalesced_mmio_buffer();
2280 mr->flush_coalesced_mmio = false;
2282 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2283 cmr = QTAILQ_FIRST(&mr->coalesced);
2284 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2285 g_free(cmr);
2286 updated = true;
2289 if (updated) {
2290 memory_region_update_coalesced_range(mr);
2294 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2296 mr->flush_coalesced_mmio = true;
2299 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2301 qemu_flush_coalesced_mmio_buffer();
2302 if (QTAILQ_EMPTY(&mr->coalesced)) {
2303 mr->flush_coalesced_mmio = false;
2307 void memory_region_clear_global_locking(MemoryRegion *mr)
2309 mr->global_locking = false;
2312 static bool userspace_eventfd_warning;
2314 void memory_region_add_eventfd(MemoryRegion *mr,
2315 hwaddr addr,
2316 unsigned size,
2317 bool match_data,
2318 uint64_t data,
2319 EventNotifier *e)
2321 MemoryRegionIoeventfd mrfd = {
2322 .addr.start = int128_make64(addr),
2323 .addr.size = int128_make64(size),
2324 .match_data = match_data,
2325 .data = data,
2326 .e = e,
2328 unsigned i;
2330 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2331 userspace_eventfd_warning))) {
2332 userspace_eventfd_warning = true;
2333 error_report("Using eventfd without MMIO binding in KVM. "
2334 "Suboptimal performance expected");
2337 if (size) {
2338 adjust_endianness(mr, &mrfd.data, size);
2340 memory_region_transaction_begin();
2341 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2342 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2343 break;
2346 ++mr->ioeventfd_nb;
2347 mr->ioeventfds = g_realloc(mr->ioeventfds,
2348 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2349 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2350 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2351 mr->ioeventfds[i] = mrfd;
2352 ioeventfd_update_pending |= mr->enabled;
2353 memory_region_transaction_commit();
2356 void memory_region_del_eventfd(MemoryRegion *mr,
2357 hwaddr addr,
2358 unsigned size,
2359 bool match_data,
2360 uint64_t data,
2361 EventNotifier *e)
2363 MemoryRegionIoeventfd mrfd = {
2364 .addr.start = int128_make64(addr),
2365 .addr.size = int128_make64(size),
2366 .match_data = match_data,
2367 .data = data,
2368 .e = e,
2370 unsigned i;
2372 if (size) {
2373 adjust_endianness(mr, &mrfd.data, size);
2375 memory_region_transaction_begin();
2376 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2377 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2378 break;
2381 assert(i != mr->ioeventfd_nb);
2382 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2383 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2384 --mr->ioeventfd_nb;
2385 mr->ioeventfds = g_realloc(mr->ioeventfds,
2386 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2387 ioeventfd_update_pending |= mr->enabled;
2388 memory_region_transaction_commit();
2391 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2393 MemoryRegion *mr = subregion->container;
2394 MemoryRegion *other;
2396 memory_region_transaction_begin();
2398 memory_region_ref(subregion);
2399 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2400 if (subregion->priority >= other->priority) {
2401 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2402 goto done;
2405 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2406 done:
2407 memory_region_update_pending |= mr->enabled && subregion->enabled;
2408 memory_region_transaction_commit();
2411 static void memory_region_add_subregion_common(MemoryRegion *mr,
2412 hwaddr offset,
2413 MemoryRegion *subregion)
2415 assert(!subregion->container);
2416 subregion->container = mr;
2417 subregion->addr = offset;
2418 memory_region_update_container_subregions(subregion);
2421 void memory_region_add_subregion(MemoryRegion *mr,
2422 hwaddr offset,
2423 MemoryRegion *subregion)
2425 subregion->priority = 0;
2426 memory_region_add_subregion_common(mr, offset, subregion);
2429 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2430 hwaddr offset,
2431 MemoryRegion *subregion,
2432 int priority)
2434 subregion->priority = priority;
2435 memory_region_add_subregion_common(mr, offset, subregion);
2438 void memory_region_del_subregion(MemoryRegion *mr,
2439 MemoryRegion *subregion)
2441 memory_region_transaction_begin();
2442 assert(subregion->container == mr);
2443 subregion->container = NULL;
2444 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2445 memory_region_unref(subregion);
2446 memory_region_update_pending |= mr->enabled && subregion->enabled;
2447 memory_region_transaction_commit();
2450 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2452 if (enabled == mr->enabled) {
2453 return;
2455 memory_region_transaction_begin();
2456 mr->enabled = enabled;
2457 memory_region_update_pending = true;
2458 memory_region_transaction_commit();
2461 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2463 Int128 s = int128_make64(size);
2465 if (size == UINT64_MAX) {
2466 s = int128_2_64();
2468 if (int128_eq(s, mr->size)) {
2469 return;
2471 memory_region_transaction_begin();
2472 mr->size = s;
2473 memory_region_update_pending = true;
2474 memory_region_transaction_commit();
2477 static void memory_region_readd_subregion(MemoryRegion *mr)
2479 MemoryRegion *container = mr->container;
2481 if (container) {
2482 memory_region_transaction_begin();
2483 memory_region_ref(mr);
2484 memory_region_del_subregion(container, mr);
2485 mr->container = container;
2486 memory_region_update_container_subregions(mr);
2487 memory_region_unref(mr);
2488 memory_region_transaction_commit();
2492 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2494 if (addr != mr->addr) {
2495 mr->addr = addr;
2496 memory_region_readd_subregion(mr);
2500 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2502 assert(mr->alias);
2504 if (offset == mr->alias_offset) {
2505 return;
2508 memory_region_transaction_begin();
2509 mr->alias_offset = offset;
2510 memory_region_update_pending |= mr->enabled;
2511 memory_region_transaction_commit();
2514 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2516 return mr->align;
2519 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2521 const AddrRange *addr = addr_;
2522 const FlatRange *fr = fr_;
2524 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2525 return -1;
2526 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2527 return 1;
2529 return 0;
2532 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2534 return bsearch(&addr, view->ranges, view->nr,
2535 sizeof(FlatRange), cmp_flatrange_addr);
2538 bool memory_region_is_mapped(MemoryRegion *mr)
2540 return mr->container ? true : false;
2543 /* Same as memory_region_find, but it does not add a reference to the
2544 * returned region. It must be called from an RCU critical section.
2546 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2547 hwaddr addr, uint64_t size)
2549 MemoryRegionSection ret = { .mr = NULL };
2550 MemoryRegion *root;
2551 AddressSpace *as;
2552 AddrRange range;
2553 FlatView *view;
2554 FlatRange *fr;
2556 addr += mr->addr;
2557 for (root = mr; root->container; ) {
2558 root = root->container;
2559 addr += root->addr;
2562 as = memory_region_to_address_space(root);
2563 if (!as) {
2564 return ret;
2566 range = addrrange_make(int128_make64(addr), int128_make64(size));
2568 view = address_space_to_flatview(as);
2569 fr = flatview_lookup(view, range);
2570 if (!fr) {
2571 return ret;
2574 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2575 --fr;
2578 ret.mr = fr->mr;
2579 ret.fv = view;
2580 range = addrrange_intersection(range, fr->addr);
2581 ret.offset_within_region = fr->offset_in_region;
2582 ret.offset_within_region += int128_get64(int128_sub(range.start,
2583 fr->addr.start));
2584 ret.size = range.size;
2585 ret.offset_within_address_space = int128_get64(range.start);
2586 ret.readonly = fr->readonly;
2587 ret.nonvolatile = fr->nonvolatile;
2588 return ret;
2591 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2592 hwaddr addr, uint64_t size)
2594 MemoryRegionSection ret;
2595 rcu_read_lock();
2596 ret = memory_region_find_rcu(mr, addr, size);
2597 if (ret.mr) {
2598 memory_region_ref(ret.mr);
2600 rcu_read_unlock();
2601 return ret;
2604 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2606 MemoryRegion *mr;
2608 rcu_read_lock();
2609 mr = memory_region_find_rcu(container, addr, 1).mr;
2610 rcu_read_unlock();
2611 return mr && mr != container;
2614 void memory_global_dirty_log_sync(void)
2616 memory_region_sync_dirty_bitmap(NULL);
2619 static VMChangeStateEntry *vmstate_change;
2621 void memory_global_dirty_log_start(void)
2623 if (vmstate_change) {
2624 qemu_del_vm_change_state_handler(vmstate_change);
2625 vmstate_change = NULL;
2628 global_dirty_log = true;
2630 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2632 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2633 memory_region_transaction_begin();
2634 memory_region_update_pending = true;
2635 memory_region_transaction_commit();
2638 static void memory_global_dirty_log_do_stop(void)
2640 global_dirty_log = false;
2642 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2643 memory_region_transaction_begin();
2644 memory_region_update_pending = true;
2645 memory_region_transaction_commit();
2647 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2650 static void memory_vm_change_state_handler(void *opaque, int running,
2651 RunState state)
2653 if (running) {
2654 memory_global_dirty_log_do_stop();
2656 if (vmstate_change) {
2657 qemu_del_vm_change_state_handler(vmstate_change);
2658 vmstate_change = NULL;
2663 void memory_global_dirty_log_stop(void)
2665 if (!runstate_is_running()) {
2666 if (vmstate_change) {
2667 return;
2669 vmstate_change = qemu_add_vm_change_state_handler(
2670 memory_vm_change_state_handler, NULL);
2671 return;
2674 memory_global_dirty_log_do_stop();
2677 static void listener_add_address_space(MemoryListener *listener,
2678 AddressSpace *as)
2680 FlatView *view;
2681 FlatRange *fr;
2683 if (listener->begin) {
2684 listener->begin(listener);
2686 if (global_dirty_log) {
2687 if (listener->log_global_start) {
2688 listener->log_global_start(listener);
2692 view = address_space_get_flatview(as);
2693 FOR_EACH_FLAT_RANGE(fr, view) {
2694 MemoryRegionSection section = section_from_flat_range(fr, view);
2696 if (listener->region_add) {
2697 listener->region_add(listener, &section);
2699 if (fr->dirty_log_mask && listener->log_start) {
2700 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2703 if (listener->commit) {
2704 listener->commit(listener);
2706 flatview_unref(view);
2709 static void listener_del_address_space(MemoryListener *listener,
2710 AddressSpace *as)
2712 FlatView *view;
2713 FlatRange *fr;
2715 if (listener->begin) {
2716 listener->begin(listener);
2718 view = address_space_get_flatview(as);
2719 FOR_EACH_FLAT_RANGE(fr, view) {
2720 MemoryRegionSection section = section_from_flat_range(fr, view);
2722 if (fr->dirty_log_mask && listener->log_stop) {
2723 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2725 if (listener->region_del) {
2726 listener->region_del(listener, &section);
2729 if (listener->commit) {
2730 listener->commit(listener);
2732 flatview_unref(view);
2735 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2737 MemoryListener *other = NULL;
2739 listener->address_space = as;
2740 if (QTAILQ_EMPTY(&memory_listeners)
2741 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
2742 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2743 } else {
2744 QTAILQ_FOREACH(other, &memory_listeners, link) {
2745 if (listener->priority < other->priority) {
2746 break;
2749 QTAILQ_INSERT_BEFORE(other, listener, link);
2752 if (QTAILQ_EMPTY(&as->listeners)
2753 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
2754 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2755 } else {
2756 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2757 if (listener->priority < other->priority) {
2758 break;
2761 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2764 listener_add_address_space(listener, as);
2767 void memory_listener_unregister(MemoryListener *listener)
2769 if (!listener->address_space) {
2770 return;
2773 listener_del_address_space(listener, listener->address_space);
2774 QTAILQ_REMOVE(&memory_listeners, listener, link);
2775 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2776 listener->address_space = NULL;
2779 void address_space_remove_listeners(AddressSpace *as)
2781 while (!QTAILQ_EMPTY(&as->listeners)) {
2782 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2786 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2788 memory_region_ref(root);
2789 as->root = root;
2790 as->current_map = NULL;
2791 as->ioeventfd_nb = 0;
2792 as->ioeventfds = NULL;
2793 QTAILQ_INIT(&as->listeners);
2794 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2795 as->name = g_strdup(name ? name : "anonymous");
2796 address_space_update_topology(as);
2797 address_space_update_ioeventfds(as);
2800 static void do_address_space_destroy(AddressSpace *as)
2802 assert(QTAILQ_EMPTY(&as->listeners));
2804 flatview_unref(as->current_map);
2805 g_free(as->name);
2806 g_free(as->ioeventfds);
2807 memory_region_unref(as->root);
2810 void address_space_destroy(AddressSpace *as)
2812 MemoryRegion *root = as->root;
2814 /* Flush out anything from MemoryListeners listening in on this */
2815 memory_region_transaction_begin();
2816 as->root = NULL;
2817 memory_region_transaction_commit();
2818 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2820 /* At this point, as->dispatch and as->current_map are dummy
2821 * entries that the guest should never use. Wait for the old
2822 * values to expire before freeing the data.
2824 as->root = root;
2825 call_rcu(as, do_address_space_destroy, rcu);
2828 static const char *memory_region_type(MemoryRegion *mr)
2830 if (memory_region_is_ram_device(mr)) {
2831 return "ramd";
2832 } else if (memory_region_is_romd(mr)) {
2833 return "romd";
2834 } else if (memory_region_is_rom(mr)) {
2835 return "rom";
2836 } else if (memory_region_is_ram(mr)) {
2837 return "ram";
2838 } else {
2839 return "i/o";
2843 typedef struct MemoryRegionList MemoryRegionList;
2845 struct MemoryRegionList {
2846 const MemoryRegion *mr;
2847 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
2850 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
2852 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2853 int128_sub((size), int128_one())) : 0)
2854 #define MTREE_INDENT " "
2856 static void mtree_expand_owner(const char *label, Object *obj)
2858 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2860 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
2861 if (dev && dev->id) {
2862 qemu_printf(" id=%s", dev->id);
2863 } else {
2864 gchar *canonical_path = object_get_canonical_path(obj);
2865 if (canonical_path) {
2866 qemu_printf(" path=%s", canonical_path);
2867 g_free(canonical_path);
2868 } else {
2869 qemu_printf(" type=%s", object_get_typename(obj));
2872 qemu_printf("}");
2875 static void mtree_print_mr_owner(const MemoryRegion *mr)
2877 Object *owner = mr->owner;
2878 Object *parent = memory_region_owner((MemoryRegion *)mr);
2880 if (!owner && !parent) {
2881 qemu_printf(" orphan");
2882 return;
2884 if (owner) {
2885 mtree_expand_owner("owner", owner);
2887 if (parent && parent != owner) {
2888 mtree_expand_owner("parent", parent);
2892 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
2893 hwaddr base,
2894 MemoryRegionListHead *alias_print_queue,
2895 bool owner)
2897 MemoryRegionList *new_ml, *ml, *next_ml;
2898 MemoryRegionListHead submr_print_queue;
2899 const MemoryRegion *submr;
2900 unsigned int i;
2901 hwaddr cur_start, cur_end;
2903 if (!mr) {
2904 return;
2907 for (i = 0; i < level; i++) {
2908 qemu_printf(MTREE_INDENT);
2911 cur_start = base + mr->addr;
2912 cur_end = cur_start + MR_SIZE(mr->size);
2915 * Try to detect overflow of memory region. This should never
2916 * happen normally. When it happens, we dump something to warn the
2917 * user who is observing this.
2919 if (cur_start < base || cur_end < cur_start) {
2920 qemu_printf("[DETECTED OVERFLOW!] ");
2923 if (mr->alias) {
2924 MemoryRegionList *ml;
2925 bool found = false;
2927 /* check if the alias is already in the queue */
2928 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
2929 if (ml->mr == mr->alias) {
2930 found = true;
2934 if (!found) {
2935 ml = g_new(MemoryRegionList, 1);
2936 ml->mr = mr->alias;
2937 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
2939 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2940 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2941 "-" TARGET_FMT_plx "%s",
2942 cur_start, cur_end,
2943 mr->priority,
2944 mr->nonvolatile ? "nv-" : "",
2945 memory_region_type((MemoryRegion *)mr),
2946 memory_region_name(mr),
2947 memory_region_name(mr->alias),
2948 mr->alias_offset,
2949 mr->alias_offset + MR_SIZE(mr->size),
2950 mr->enabled ? "" : " [disabled]");
2951 if (owner) {
2952 mtree_print_mr_owner(mr);
2954 } else {
2955 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2956 " (prio %d, %s%s): %s%s",
2957 cur_start, cur_end,
2958 mr->priority,
2959 mr->nonvolatile ? "nv-" : "",
2960 memory_region_type((MemoryRegion *)mr),
2961 memory_region_name(mr),
2962 mr->enabled ? "" : " [disabled]");
2963 if (owner) {
2964 mtree_print_mr_owner(mr);
2967 qemu_printf("\n");
2969 QTAILQ_INIT(&submr_print_queue);
2971 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2972 new_ml = g_new(MemoryRegionList, 1);
2973 new_ml->mr = submr;
2974 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2975 if (new_ml->mr->addr < ml->mr->addr ||
2976 (new_ml->mr->addr == ml->mr->addr &&
2977 new_ml->mr->priority > ml->mr->priority)) {
2978 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2979 new_ml = NULL;
2980 break;
2983 if (new_ml) {
2984 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2988 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2989 mtree_print_mr(ml->mr, level + 1, cur_start,
2990 alias_print_queue, owner);
2993 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
2994 g_free(ml);
2998 struct FlatViewInfo {
2999 int counter;
3000 bool dispatch_tree;
3001 bool owner;
3004 static void mtree_print_flatview(gpointer key, gpointer value,
3005 gpointer user_data)
3007 FlatView *view = key;
3008 GArray *fv_address_spaces = value;
3009 struct FlatViewInfo *fvi = user_data;
3010 FlatRange *range = &view->ranges[0];
3011 MemoryRegion *mr;
3012 int n = view->nr;
3013 int i;
3014 AddressSpace *as;
3016 qemu_printf("FlatView #%d\n", fvi->counter);
3017 ++fvi->counter;
3019 for (i = 0; i < fv_address_spaces->len; ++i) {
3020 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3021 qemu_printf(" AS \"%s\", root: %s",
3022 as->name, memory_region_name(as->root));
3023 if (as->root->alias) {
3024 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3026 qemu_printf("\n");
3029 qemu_printf(" Root memory region: %s\n",
3030 view->root ? memory_region_name(view->root) : "(none)");
3032 if (n <= 0) {
3033 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3034 return;
3037 while (n--) {
3038 mr = range->mr;
3039 if (range->offset_in_region) {
3040 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3041 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3042 int128_get64(range->addr.start),
3043 int128_get64(range->addr.start)
3044 + MR_SIZE(range->addr.size),
3045 mr->priority,
3046 range->nonvolatile ? "nv-" : "",
3047 range->readonly ? "rom" : memory_region_type(mr),
3048 memory_region_name(mr),
3049 range->offset_in_region);
3050 } else {
3051 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3052 " (prio %d, %s%s): %s",
3053 int128_get64(range->addr.start),
3054 int128_get64(range->addr.start)
3055 + MR_SIZE(range->addr.size),
3056 mr->priority,
3057 range->nonvolatile ? "nv-" : "",
3058 range->readonly ? "rom" : memory_region_type(mr),
3059 memory_region_name(mr));
3061 if (fvi->owner) {
3062 mtree_print_mr_owner(mr);
3064 qemu_printf("\n");
3065 range++;
3068 #if !defined(CONFIG_USER_ONLY)
3069 if (fvi->dispatch_tree && view->root) {
3070 mtree_print_dispatch(view->dispatch, view->root);
3072 #endif
3074 qemu_printf("\n");
3077 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3078 gpointer user_data)
3080 FlatView *view = key;
3081 GArray *fv_address_spaces = value;
3083 g_array_unref(fv_address_spaces);
3084 flatview_unref(view);
3086 return true;
3089 void mtree_info(bool flatview, bool dispatch_tree, bool owner)
3091 MemoryRegionListHead ml_head;
3092 MemoryRegionList *ml, *ml2;
3093 AddressSpace *as;
3095 if (flatview) {
3096 FlatView *view;
3097 struct FlatViewInfo fvi = {
3098 .counter = 0,
3099 .dispatch_tree = dispatch_tree,
3100 .owner = owner,
3102 GArray *fv_address_spaces;
3103 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3105 /* Gather all FVs in one table */
3106 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3107 view = address_space_get_flatview(as);
3109 fv_address_spaces = g_hash_table_lookup(views, view);
3110 if (!fv_address_spaces) {
3111 fv_address_spaces = g_array_new(false, false, sizeof(as));
3112 g_hash_table_insert(views, view, fv_address_spaces);
3115 g_array_append_val(fv_address_spaces, as);
3118 /* Print */
3119 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3121 /* Free */
3122 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3123 g_hash_table_unref(views);
3125 return;
3128 QTAILQ_INIT(&ml_head);
3130 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3131 qemu_printf("address-space: %s\n", as->name);
3132 mtree_print_mr(as->root, 1, 0, &ml_head, owner);
3133 qemu_printf("\n");
3136 /* print aliased regions */
3137 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3138 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3139 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
3140 qemu_printf("\n");
3143 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3144 g_free(ml);
3148 void memory_region_init_ram(MemoryRegion *mr,
3149 struct Object *owner,
3150 const char *name,
3151 uint64_t size,
3152 Error **errp)
3154 DeviceState *owner_dev;
3155 Error *err = NULL;
3157 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3158 if (err) {
3159 error_propagate(errp, err);
3160 return;
3162 /* This will assert if owner is neither NULL nor a DeviceState.
3163 * We only want the owner here for the purposes of defining a
3164 * unique name for migration. TODO: Ideally we should implement
3165 * a naming scheme for Objects which are not DeviceStates, in
3166 * which case we can relax this restriction.
3168 owner_dev = DEVICE(owner);
3169 vmstate_register_ram(mr, owner_dev);
3172 void memory_region_init_rom(MemoryRegion *mr,
3173 struct Object *owner,
3174 const char *name,
3175 uint64_t size,
3176 Error **errp)
3178 DeviceState *owner_dev;
3179 Error *err = NULL;
3181 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3182 if (err) {
3183 error_propagate(errp, err);
3184 return;
3186 /* This will assert if owner is neither NULL nor a DeviceState.
3187 * We only want the owner here for the purposes of defining a
3188 * unique name for migration. TODO: Ideally we should implement
3189 * a naming scheme for Objects which are not DeviceStates, in
3190 * which case we can relax this restriction.
3192 owner_dev = DEVICE(owner);
3193 vmstate_register_ram(mr, owner_dev);
3196 void memory_region_init_rom_device(MemoryRegion *mr,
3197 struct Object *owner,
3198 const MemoryRegionOps *ops,
3199 void *opaque,
3200 const char *name,
3201 uint64_t size,
3202 Error **errp)
3204 DeviceState *owner_dev;
3205 Error *err = NULL;
3207 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3208 name, size, &err);
3209 if (err) {
3210 error_propagate(errp, err);
3211 return;
3213 /* This will assert if owner is neither NULL nor a DeviceState.
3214 * We only want the owner here for the purposes of defining a
3215 * unique name for migration. TODO: Ideally we should implement
3216 * a naming scheme for Objects which are not DeviceStates, in
3217 * which case we can relax this restriction.
3219 owner_dev = DEVICE(owner);
3220 vmstate_register_ram(mr, owner_dev);
3223 static const TypeInfo memory_region_info = {
3224 .parent = TYPE_OBJECT,
3225 .name = TYPE_MEMORY_REGION,
3226 .instance_size = sizeof(MemoryRegion),
3227 .instance_init = memory_region_initfn,
3228 .instance_finalize = memory_region_finalize,
3231 static const TypeInfo iommu_memory_region_info = {
3232 .parent = TYPE_MEMORY_REGION,
3233 .name = TYPE_IOMMU_MEMORY_REGION,
3234 .class_size = sizeof(IOMMUMemoryRegionClass),
3235 .instance_size = sizeof(IOMMUMemoryRegion),
3236 .instance_init = iommu_memory_region_initfn,
3237 .abstract = true,
3240 static void memory_register_types(void)
3242 type_register_static(&memory_region_info);
3243 type_register_static(&iommu_memory_region_info);
3246 type_init(memory_register_types)