nsis: Add qemu-edid and qemu-ga
[qemu/ar7.git] / memory.c
blob4af4e00c95a8bff83383d4bc86a0b90513cb31af
1 /*
2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "cpu.h"
20 #include "exec/exec-all.h" /* qemu_sprint_backtrace */
21 #include "exec/memory.h"
22 #include "exec/address-spaces.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qom/object.h"
27 #include "trace-root.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
36 //#define DEBUG_UNASSIGNED
38 static unsigned memory_region_transaction_depth;
39 static bool memory_region_update_pending;
40 static bool ioeventfd_update_pending;
41 static bool global_dirty_log = false;
43 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
44 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
46 static QTAILQ_HEAD(, AddressSpace) address_spaces
47 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49 static GHashTable *flat_views;
51 typedef struct AddrRange AddrRange;
54 * Note that signed integers are needed for negative offsetting in aliases
55 * (large MemoryRegion::alias_offset).
57 struct AddrRange {
58 Int128 start;
59 Int128 size;
62 static AddrRange addrrange_make(Int128 start, Int128 size)
64 return (AddrRange) { start, size };
67 static bool addrrange_equal(AddrRange r1, AddrRange r2)
69 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
72 static Int128 addrrange_end(AddrRange r)
74 return int128_add(r.start, r.size);
77 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
79 int128_addto(&range.start, delta);
80 return range;
83 static bool addrrange_contains(AddrRange range, Int128 addr)
85 return int128_ge(addr, range.start)
86 && int128_lt(addr, addrrange_end(range));
89 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
91 return addrrange_contains(r1, r2.start)
92 || addrrange_contains(r2, r1.start);
95 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
97 Int128 start = int128_max(r1.start, r2.start);
98 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
99 return addrrange_make(start, int128_sub(end, start));
102 enum ListenerDirection { Forward, Reverse };
104 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
105 do { \
106 MemoryListener *_listener; \
108 switch (_direction) { \
109 case Forward: \
110 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
111 if (_listener->_callback) { \
112 _listener->_callback(_listener, ##_args); \
115 break; \
116 case Reverse: \
117 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
118 memory_listeners, link) { \
119 if (_listener->_callback) { \
120 _listener->_callback(_listener, ##_args); \
123 break; \
124 default: \
125 abort(); \
127 } while (0)
129 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
130 do { \
131 MemoryListener *_listener; \
132 struct memory_listeners_as *list = &(_as)->listeners; \
134 switch (_direction) { \
135 case Forward: \
136 QTAILQ_FOREACH(_listener, list, link_as) { \
137 if (_listener->_callback) { \
138 _listener->_callback(_listener, _section, ##_args); \
141 break; \
142 case Reverse: \
143 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
144 link_as) { \
145 if (_listener->_callback) { \
146 _listener->_callback(_listener, _section, ##_args); \
149 break; \
150 default: \
151 abort(); \
153 } while (0)
155 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
156 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
157 do { \
158 MemoryRegionSection mrs = section_from_flat_range(fr, \
159 address_space_to_flatview(as)); \
160 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
161 } while(0)
163 struct CoalescedMemoryRange {
164 AddrRange addr;
165 QTAILQ_ENTRY(CoalescedMemoryRange) link;
168 struct MemoryRegionIoeventfd {
169 AddrRange addr;
170 bool match_data;
171 uint64_t data;
172 EventNotifier *e;
175 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
176 MemoryRegionIoeventfd *b)
178 if (int128_lt(a->addr.start, b->addr.start)) {
179 return true;
180 } else if (int128_gt(a->addr.start, b->addr.start)) {
181 return false;
182 } else if (int128_lt(a->addr.size, b->addr.size)) {
183 return true;
184 } else if (int128_gt(a->addr.size, b->addr.size)) {
185 return false;
186 } else if (a->match_data < b->match_data) {
187 return true;
188 } else if (a->match_data > b->match_data) {
189 return false;
190 } else if (a->match_data) {
191 if (a->data < b->data) {
192 return true;
193 } else if (a->data > b->data) {
194 return false;
197 if (a->e < b->e) {
198 return true;
199 } else if (a->e > b->e) {
200 return false;
202 return false;
205 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
206 MemoryRegionIoeventfd *b)
208 return !memory_region_ioeventfd_before(a, b)
209 && !memory_region_ioeventfd_before(b, a);
212 /* Range of memory in the global map. Addresses are absolute. */
213 struct FlatRange {
214 MemoryRegion *mr;
215 hwaddr offset_in_region;
216 AddrRange addr;
217 uint8_t dirty_log_mask;
218 bool romd_mode;
219 bool readonly;
220 bool nonvolatile;
223 #define FOR_EACH_FLAT_RANGE(var, view) \
224 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
226 static inline MemoryRegionSection
227 section_from_flat_range(FlatRange *fr, FlatView *fv)
229 return (MemoryRegionSection) {
230 .mr = fr->mr,
231 .fv = fv,
232 .offset_within_region = fr->offset_in_region,
233 .size = fr->addr.size,
234 .offset_within_address_space = int128_get64(fr->addr.start),
235 .readonly = fr->readonly,
236 .nonvolatile = fr->nonvolatile,
240 static bool flatrange_equal(FlatRange *a, FlatRange *b)
242 return a->mr == b->mr
243 && addrrange_equal(a->addr, b->addr)
244 && a->offset_in_region == b->offset_in_region
245 && a->romd_mode == b->romd_mode
246 && a->readonly == b->readonly
247 && a->nonvolatile == b->nonvolatile;
250 static FlatView *flatview_new(MemoryRegion *mr_root)
252 FlatView *view;
254 view = g_new0(FlatView, 1);
255 view->ref = 1;
256 view->root = mr_root;
257 memory_region_ref(mr_root);
258 trace_flatview_new(view, mr_root);
260 return view;
263 /* Insert a range into a given position. Caller is responsible for maintaining
264 * sorting order.
266 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
268 if (view->nr == view->nr_allocated) {
269 view->nr_allocated = MAX(2 * view->nr, 10);
270 view->ranges = g_realloc(view->ranges,
271 view->nr_allocated * sizeof(*view->ranges));
273 memmove(view->ranges + pos + 1, view->ranges + pos,
274 (view->nr - pos) * sizeof(FlatRange));
275 view->ranges[pos] = *range;
276 memory_region_ref(range->mr);
277 ++view->nr;
280 static void flatview_destroy(FlatView *view)
282 int i;
284 trace_flatview_destroy(view, view->root);
285 if (view->dispatch) {
286 address_space_dispatch_free(view->dispatch);
288 for (i = 0; i < view->nr; i++) {
289 memory_region_unref(view->ranges[i].mr);
291 g_free(view->ranges);
292 memory_region_unref(view->root);
293 g_free(view);
296 static bool flatview_ref(FlatView *view)
298 return atomic_fetch_inc_nonzero(&view->ref) > 0;
301 void flatview_unref(FlatView *view)
303 if (atomic_fetch_dec(&view->ref) == 1) {
304 trace_flatview_destroy_rcu(view, view->root);
305 assert(view->root);
306 call_rcu(view, flatview_destroy, rcu);
310 static bool can_merge(FlatRange *r1, FlatRange *r2)
312 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
313 && r1->mr == r2->mr
314 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
315 r1->addr.size),
316 int128_make64(r2->offset_in_region))
317 && r1->dirty_log_mask == r2->dirty_log_mask
318 && r1->romd_mode == r2->romd_mode
319 && r1->readonly == r2->readonly
320 && r1->nonvolatile == r2->nonvolatile;
323 /* Attempt to simplify a view by merging adjacent ranges */
324 static void flatview_simplify(FlatView *view)
326 unsigned i, j;
328 i = 0;
329 while (i < view->nr) {
330 j = i + 1;
331 while (j < view->nr
332 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
333 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
334 ++j;
336 ++i;
337 memmove(&view->ranges[i], &view->ranges[j],
338 (view->nr - j) * sizeof(view->ranges[j]));
339 view->nr -= j - i;
343 static bool memory_region_big_endian(MemoryRegion *mr)
345 #ifdef TARGET_WORDS_BIGENDIAN
346 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
347 #else
348 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
349 #endif
352 static bool memory_region_wrong_endianness(MemoryRegion *mr)
354 #ifdef TARGET_WORDS_BIGENDIAN
355 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
356 #else
357 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
358 #endif
361 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
363 if (memory_region_wrong_endianness(mr)) {
364 switch (size) {
365 case 1:
366 break;
367 case 2:
368 *data = bswap16(*data);
369 break;
370 case 4:
371 *data = bswap32(*data);
372 break;
373 case 8:
374 *data = bswap64(*data);
375 break;
376 default:
377 abort();
382 static inline void memory_region_shift_read_access(uint64_t *value,
383 signed shift,
384 uint64_t mask,
385 uint64_t tmp)
387 if (shift >= 0) {
388 *value |= (tmp & mask) << shift;
389 } else {
390 *value |= (tmp & mask) >> -shift;
394 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
395 signed shift,
396 uint64_t mask)
398 uint64_t tmp;
400 if (shift >= 0) {
401 tmp = (*value >> shift) & mask;
402 } else {
403 tmp = (*value << -shift) & mask;
406 return tmp;
409 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
411 MemoryRegion *root;
412 hwaddr abs_addr = offset;
414 abs_addr += mr->addr;
415 for (root = mr; root->container; ) {
416 root = root->container;
417 abs_addr += root->addr;
420 return abs_addr;
423 static int get_cpu_index(void)
425 if (current_cpu) {
426 return current_cpu->cpu_index;
428 return -1;
431 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
432 hwaddr addr,
433 uint64_t *value,
434 unsigned size,
435 signed shift,
436 uint64_t mask,
437 MemTxAttrs attrs)
439 uint64_t tmp;
441 tmp = mr->ops->read(mr->opaque, addr, size);
442 if (mr->subpage) {
443 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
444 } else if (mr == &io_mem_notdirty) {
445 /* Accesses to code which has previously been translated into a TB show
446 * up in the MMIO path, as accesses to the io_mem_notdirty
447 * MemoryRegion. */
448 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
449 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
450 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
451 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
453 memory_region_shift_read_access(value, shift, mask, tmp);
454 return MEMTX_OK;
457 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
458 hwaddr addr,
459 uint64_t *value,
460 unsigned size,
461 signed shift,
462 uint64_t mask,
463 MemTxAttrs attrs)
465 uint64_t tmp = 0;
466 MemTxResult r;
468 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
469 if (mr->subpage) {
470 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
471 } else if (mr == &io_mem_notdirty) {
472 /* Accesses to code which has previously been translated into a TB show
473 * up in the MMIO path, as accesses to the io_mem_notdirty
474 * MemoryRegion. */
475 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
476 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
477 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
478 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
480 memory_region_shift_read_access(value, shift, mask, tmp);
481 return r;
484 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
485 hwaddr addr,
486 uint64_t *value,
487 unsigned size,
488 signed shift,
489 uint64_t mask,
490 MemTxAttrs attrs)
492 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
494 if (mr->subpage) {
495 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
496 } else if (mr == &io_mem_notdirty) {
497 /* Accesses to code which has previously been translated into a TB show
498 * up in the MMIO path, as accesses to the io_mem_notdirty
499 * MemoryRegion. */
500 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
501 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
502 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
503 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
505 mr->ops->write(mr->opaque, addr, tmp, size);
506 return MEMTX_OK;
509 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
510 hwaddr addr,
511 uint64_t *value,
512 unsigned size,
513 signed shift,
514 uint64_t mask,
515 MemTxAttrs attrs)
517 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
519 if (mr->subpage) {
520 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
521 } else if (mr == &io_mem_notdirty) {
522 /* Accesses to code which has previously been translated into a TB show
523 * up in the MMIO path, as accesses to the io_mem_notdirty
524 * MemoryRegion. */
525 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
526 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
527 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
528 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
530 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
533 static MemTxResult access_with_adjusted_size(hwaddr addr,
534 uint64_t *value,
535 unsigned size,
536 unsigned access_size_min,
537 unsigned access_size_max,
538 MemTxResult (*access_fn)
539 (MemoryRegion *mr,
540 hwaddr addr,
541 uint64_t *value,
542 unsigned size,
543 signed shift,
544 uint64_t mask,
545 MemTxAttrs attrs),
546 MemoryRegion *mr,
547 MemTxAttrs attrs)
549 uint64_t access_mask;
550 unsigned access_size;
551 unsigned i;
552 MemTxResult r = MEMTX_OK;
554 if (!access_size_min) {
555 access_size_min = 1;
557 if (!access_size_max) {
558 access_size_max = 4;
561 /* FIXME: support unaligned access? */
562 access_size = MAX(MIN(size, access_size_max), access_size_min);
563 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
564 if (memory_region_big_endian(mr)) {
565 for (i = 0; i < size; i += access_size) {
566 r |= access_fn(mr, addr + i, value, access_size,
567 (size - access_size - i) * 8, access_mask, attrs);
569 } else {
570 for (i = 0; i < size; i += access_size) {
571 r |= access_fn(mr, addr + i, value, access_size, i * 8,
572 access_mask, attrs);
575 return r;
578 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
580 AddressSpace *as;
582 while (mr->container) {
583 mr = mr->container;
585 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
586 if (mr == as->root) {
587 return as;
590 return NULL;
593 /* Render a memory region into the global view. Ranges in @view obscure
594 * ranges in @mr.
596 static void render_memory_region(FlatView *view,
597 MemoryRegion *mr,
598 Int128 base,
599 AddrRange clip,
600 bool readonly,
601 bool nonvolatile)
603 MemoryRegion *subregion;
604 unsigned i;
605 hwaddr offset_in_region;
606 Int128 remain;
607 Int128 now;
608 FlatRange fr;
609 AddrRange tmp;
611 if (!mr->enabled) {
612 return;
615 int128_addto(&base, int128_make64(mr->addr));
616 readonly |= mr->readonly;
617 nonvolatile |= mr->nonvolatile;
619 tmp = addrrange_make(base, mr->size);
621 if (!addrrange_intersects(tmp, clip)) {
622 return;
625 clip = addrrange_intersection(tmp, clip);
627 if (mr->alias) {
628 int128_subfrom(&base, int128_make64(mr->alias->addr));
629 int128_subfrom(&base, int128_make64(mr->alias_offset));
630 render_memory_region(view, mr->alias, base, clip,
631 readonly, nonvolatile);
632 return;
635 /* Render subregions in priority order. */
636 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
637 render_memory_region(view, subregion, base, clip,
638 readonly, nonvolatile);
641 if (!mr->terminates) {
642 return;
645 offset_in_region = int128_get64(int128_sub(clip.start, base));
646 base = clip.start;
647 remain = clip.size;
649 fr.mr = mr;
650 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
651 fr.romd_mode = mr->romd_mode;
652 fr.readonly = readonly;
653 fr.nonvolatile = nonvolatile;
655 /* Render the region itself into any gaps left by the current view. */
656 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
657 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
658 continue;
660 if (int128_lt(base, view->ranges[i].addr.start)) {
661 now = int128_min(remain,
662 int128_sub(view->ranges[i].addr.start, base));
663 fr.offset_in_region = offset_in_region;
664 fr.addr = addrrange_make(base, now);
665 flatview_insert(view, i, &fr);
666 ++i;
667 int128_addto(&base, now);
668 offset_in_region += int128_get64(now);
669 int128_subfrom(&remain, now);
671 now = int128_sub(int128_min(int128_add(base, remain),
672 addrrange_end(view->ranges[i].addr)),
673 base);
674 int128_addto(&base, now);
675 offset_in_region += int128_get64(now);
676 int128_subfrom(&remain, now);
678 if (int128_nz(remain)) {
679 fr.offset_in_region = offset_in_region;
680 fr.addr = addrrange_make(base, remain);
681 flatview_insert(view, i, &fr);
685 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
687 while (mr->enabled) {
688 if (mr->alias) {
689 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
690 /* The alias is included in its entirety. Use it as
691 * the "real" root, so that we can share more FlatViews.
693 mr = mr->alias;
694 continue;
696 } else if (!mr->terminates) {
697 unsigned int found = 0;
698 MemoryRegion *child, *next = NULL;
699 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
700 if (child->enabled) {
701 if (++found > 1) {
702 next = NULL;
703 break;
705 if (!child->addr && int128_ge(mr->size, child->size)) {
706 /* A child is included in its entirety. If it's the only
707 * enabled one, use it in the hope of finding an alias down the
708 * way. This will also let us share FlatViews.
710 next = child;
714 if (found == 0) {
715 return NULL;
717 if (next) {
718 mr = next;
719 continue;
723 return mr;
726 return NULL;
729 /* Render a memory topology into a list of disjoint absolute ranges. */
730 static FlatView *generate_memory_topology(MemoryRegion *mr)
732 int i;
733 FlatView *view;
735 view = flatview_new(mr);
737 if (mr) {
738 render_memory_region(view, mr, int128_zero(),
739 addrrange_make(int128_zero(), int128_2_64()),
740 false, false);
742 flatview_simplify(view);
744 view->dispatch = address_space_dispatch_new(view);
745 for (i = 0; i < view->nr; i++) {
746 MemoryRegionSection mrs =
747 section_from_flat_range(&view->ranges[i], view);
748 flatview_add_to_dispatch(view, &mrs);
750 address_space_dispatch_compact(view->dispatch);
751 g_hash_table_replace(flat_views, mr, view);
753 return view;
756 static void address_space_add_del_ioeventfds(AddressSpace *as,
757 MemoryRegionIoeventfd *fds_new,
758 unsigned fds_new_nb,
759 MemoryRegionIoeventfd *fds_old,
760 unsigned fds_old_nb)
762 unsigned iold, inew;
763 MemoryRegionIoeventfd *fd;
764 MemoryRegionSection section;
766 /* Generate a symmetric difference of the old and new fd sets, adding
767 * and deleting as necessary.
770 iold = inew = 0;
771 while (iold < fds_old_nb || inew < fds_new_nb) {
772 if (iold < fds_old_nb
773 && (inew == fds_new_nb
774 || memory_region_ioeventfd_before(&fds_old[iold],
775 &fds_new[inew]))) {
776 fd = &fds_old[iold];
777 section = (MemoryRegionSection) {
778 .fv = address_space_to_flatview(as),
779 .offset_within_address_space = int128_get64(fd->addr.start),
780 .size = fd->addr.size,
782 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
783 fd->match_data, fd->data, fd->e);
784 ++iold;
785 } else if (inew < fds_new_nb
786 && (iold == fds_old_nb
787 || memory_region_ioeventfd_before(&fds_new[inew],
788 &fds_old[iold]))) {
789 fd = &fds_new[inew];
790 section = (MemoryRegionSection) {
791 .fv = address_space_to_flatview(as),
792 .offset_within_address_space = int128_get64(fd->addr.start),
793 .size = fd->addr.size,
795 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
796 fd->match_data, fd->data, fd->e);
797 ++inew;
798 } else {
799 ++iold;
800 ++inew;
805 FlatView *address_space_get_flatview(AddressSpace *as)
807 FlatView *view;
809 rcu_read_lock();
810 do {
811 view = address_space_to_flatview(as);
812 /* If somebody has replaced as->current_map concurrently,
813 * flatview_ref returns false.
815 } while (!flatview_ref(view));
816 rcu_read_unlock();
817 return view;
820 static void address_space_update_ioeventfds(AddressSpace *as)
822 FlatView *view;
823 FlatRange *fr;
824 unsigned ioeventfd_nb = 0;
825 MemoryRegionIoeventfd *ioeventfds = NULL;
826 AddrRange tmp;
827 unsigned i;
829 view = address_space_get_flatview(as);
830 FOR_EACH_FLAT_RANGE(fr, view) {
831 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
832 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
833 int128_sub(fr->addr.start,
834 int128_make64(fr->offset_in_region)));
835 if (addrrange_intersects(fr->addr, tmp)) {
836 ++ioeventfd_nb;
837 ioeventfds = g_realloc(ioeventfds,
838 ioeventfd_nb * sizeof(*ioeventfds));
839 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
840 ioeventfds[ioeventfd_nb-1].addr = tmp;
845 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
846 as->ioeventfds, as->ioeventfd_nb);
848 g_free(as->ioeventfds);
849 as->ioeventfds = ioeventfds;
850 as->ioeventfd_nb = ioeventfd_nb;
851 flatview_unref(view);
854 static void address_space_update_topology_pass(AddressSpace *as,
855 const FlatView *old_view,
856 const FlatView *new_view,
857 bool adding)
859 unsigned iold, inew;
860 FlatRange *frold, *frnew;
862 /* Generate a symmetric difference of the old and new memory maps.
863 * Kill ranges in the old map, and instantiate ranges in the new map.
865 iold = inew = 0;
866 while (iold < old_view->nr || inew < new_view->nr) {
867 if (iold < old_view->nr) {
868 frold = &old_view->ranges[iold];
869 } else {
870 frold = NULL;
872 if (inew < new_view->nr) {
873 frnew = &new_view->ranges[inew];
874 } else {
875 frnew = NULL;
878 if (frold
879 && (!frnew
880 || int128_lt(frold->addr.start, frnew->addr.start)
881 || (int128_eq(frold->addr.start, frnew->addr.start)
882 && !flatrange_equal(frold, frnew)))) {
883 /* In old but not in new, or in both but attributes changed. */
885 if (!adding) {
886 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
889 ++iold;
890 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
891 /* In both and unchanged (except logging may have changed) */
893 if (adding) {
894 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
895 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
896 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
897 frold->dirty_log_mask,
898 frnew->dirty_log_mask);
900 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
901 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
902 frold->dirty_log_mask,
903 frnew->dirty_log_mask);
907 ++iold;
908 ++inew;
909 } else {
910 /* In new */
912 if (adding) {
913 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
916 ++inew;
921 static void flatviews_init(void)
923 static FlatView *empty_view;
925 if (flat_views) {
926 return;
929 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
930 (GDestroyNotify) flatview_unref);
931 if (!empty_view) {
932 empty_view = generate_memory_topology(NULL);
933 /* We keep it alive forever in the global variable. */
934 flatview_ref(empty_view);
935 } else {
936 g_hash_table_replace(flat_views, NULL, empty_view);
937 flatview_ref(empty_view);
941 static void flatviews_reset(void)
943 AddressSpace *as;
945 if (flat_views) {
946 g_hash_table_unref(flat_views);
947 flat_views = NULL;
949 flatviews_init();
951 /* Render unique FVs */
952 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
953 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
955 if (g_hash_table_lookup(flat_views, physmr)) {
956 continue;
959 generate_memory_topology(physmr);
963 static void address_space_set_flatview(AddressSpace *as)
965 FlatView *old_view = address_space_to_flatview(as);
966 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
967 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
969 assert(new_view);
971 if (old_view == new_view) {
972 return;
975 if (old_view) {
976 flatview_ref(old_view);
979 flatview_ref(new_view);
981 if (!QTAILQ_EMPTY(&as->listeners)) {
982 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
984 if (!old_view2) {
985 old_view2 = &tmpview;
987 address_space_update_topology_pass(as, old_view2, new_view, false);
988 address_space_update_topology_pass(as, old_view2, new_view, true);
991 /* Writes are protected by the BQL. */
992 atomic_rcu_set(&as->current_map, new_view);
993 if (old_view) {
994 flatview_unref(old_view);
997 /* Note that all the old MemoryRegions are still alive up to this
998 * point. This relieves most MemoryListeners from the need to
999 * ref/unref the MemoryRegions they get---unless they use them
1000 * outside the iothread mutex, in which case precise reference
1001 * counting is necessary.
1003 if (old_view) {
1004 flatview_unref(old_view);
1008 static void address_space_update_topology(AddressSpace *as)
1010 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1012 flatviews_init();
1013 if (!g_hash_table_lookup(flat_views, physmr)) {
1014 generate_memory_topology(physmr);
1016 address_space_set_flatview(as);
1019 void memory_region_transaction_begin(void)
1021 qemu_flush_coalesced_mmio_buffer();
1022 ++memory_region_transaction_depth;
1025 void memory_region_transaction_commit(void)
1027 AddressSpace *as;
1029 assert(memory_region_transaction_depth);
1030 assert(qemu_mutex_iothread_locked());
1032 --memory_region_transaction_depth;
1033 if (!memory_region_transaction_depth) {
1034 if (memory_region_update_pending) {
1035 flatviews_reset();
1037 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1039 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1040 address_space_set_flatview(as);
1041 address_space_update_ioeventfds(as);
1043 memory_region_update_pending = false;
1044 ioeventfd_update_pending = false;
1045 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1046 } else if (ioeventfd_update_pending) {
1047 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1048 address_space_update_ioeventfds(as);
1050 ioeventfd_update_pending = false;
1055 static void memory_region_destructor_none(MemoryRegion *mr)
1059 static void memory_region_destructor_ram(MemoryRegion *mr)
1061 qemu_ram_free(mr->ram_block);
1064 static bool memory_region_need_escape(char c)
1066 return c == '/' || c == '[' || c == '\\' || c == ']';
1069 static char *memory_region_escape_name(const char *name)
1071 const char *p;
1072 char *escaped, *q;
1073 uint8_t c;
1074 size_t bytes = 0;
1076 for (p = name; *p; p++) {
1077 bytes += memory_region_need_escape(*p) ? 4 : 1;
1079 if (bytes == p - name) {
1080 return g_memdup(name, bytes + 1);
1083 escaped = g_malloc(bytes + 1);
1084 for (p = name, q = escaped; *p; p++) {
1085 c = *p;
1086 if (unlikely(memory_region_need_escape(c))) {
1087 *q++ = '\\';
1088 *q++ = 'x';
1089 *q++ = "0123456789abcdef"[c >> 4];
1090 c = "0123456789abcdef"[c & 15];
1092 *q++ = c;
1094 *q = 0;
1095 return escaped;
1098 static void memory_region_do_init(MemoryRegion *mr,
1099 Object *owner,
1100 const char *name,
1101 uint64_t size)
1103 mr->size = int128_make64(size);
1104 if (size == UINT64_MAX) {
1105 mr->size = int128_2_64();
1107 mr->name = g_strdup(name);
1108 mr->owner = owner;
1109 mr->ram_block = NULL;
1111 if (name) {
1112 char *escaped_name = memory_region_escape_name(name);
1113 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1115 if (!owner) {
1116 owner = container_get(qdev_get_machine(), "/unattached");
1119 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1120 object_unref(OBJECT(mr));
1121 g_free(name_array);
1122 g_free(escaped_name);
1126 void memory_region_init(MemoryRegion *mr,
1127 Object *owner,
1128 const char *name,
1129 uint64_t size)
1131 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1132 memory_region_do_init(mr, owner, name, size);
1135 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1136 void *opaque, Error **errp)
1138 MemoryRegion *mr = MEMORY_REGION(obj);
1139 uint64_t value = mr->addr;
1141 visit_type_uint64(v, name, &value, errp);
1144 static void memory_region_get_container(Object *obj, Visitor *v,
1145 const char *name, void *opaque,
1146 Error **errp)
1148 MemoryRegion *mr = MEMORY_REGION(obj);
1149 gchar *path = (gchar *)"";
1151 if (mr->container) {
1152 path = object_get_canonical_path(OBJECT(mr->container));
1154 visit_type_str(v, name, &path, errp);
1155 if (mr->container) {
1156 g_free(path);
1160 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1161 const char *part)
1163 MemoryRegion *mr = MEMORY_REGION(obj);
1165 return OBJECT(mr->container);
1168 static void memory_region_get_priority(Object *obj, Visitor *v,
1169 const char *name, void *opaque,
1170 Error **errp)
1172 MemoryRegion *mr = MEMORY_REGION(obj);
1173 int32_t value = mr->priority;
1175 visit_type_int32(v, name, &value, errp);
1178 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1179 void *opaque, Error **errp)
1181 MemoryRegion *mr = MEMORY_REGION(obj);
1182 uint64_t value = memory_region_size(mr);
1184 visit_type_uint64(v, name, &value, errp);
1187 static void memory_region_initfn(Object *obj)
1189 MemoryRegion *mr = MEMORY_REGION(obj);
1190 ObjectProperty *op;
1192 mr->ops = &unassigned_mem_ops;
1193 mr->enabled = true;
1194 mr->romd_mode = true;
1195 mr->global_locking = true;
1196 mr->destructor = memory_region_destructor_none;
1197 QTAILQ_INIT(&mr->subregions);
1198 QTAILQ_INIT(&mr->coalesced);
1200 op = object_property_add(OBJECT(mr), "container",
1201 "link<" TYPE_MEMORY_REGION ">",
1202 memory_region_get_container,
1203 NULL, /* memory_region_set_container */
1204 NULL, NULL, &error_abort);
1205 op->resolve = memory_region_resolve_container;
1207 object_property_add(OBJECT(mr), "addr", "uint64",
1208 memory_region_get_addr,
1209 NULL, /* memory_region_set_addr */
1210 NULL, NULL, &error_abort);
1211 object_property_add(OBJECT(mr), "priority", "uint32",
1212 memory_region_get_priority,
1213 NULL, /* memory_region_set_priority */
1214 NULL, NULL, &error_abort);
1215 object_property_add(OBJECT(mr), "size", "uint64",
1216 memory_region_get_size,
1217 NULL, /* memory_region_set_size, */
1218 NULL, NULL, &error_abort);
1221 static int qemu_target_backtrace(target_ulong *array, size_t size)
1223 int n = 0;
1224 if (size >= 2) {
1225 #if defined(TARGET_ARM)
1226 CPUArchState *env = current_cpu->env_ptr;
1227 array[0] = env->regs[15];
1228 array[1] = env->regs[14];
1229 #elif defined(TARGET_MIPS)
1230 CPUArchState *env = current_cpu->env_ptr;
1231 array[0] = env->active_tc.PC;
1232 array[1] = env->active_tc.gpr[31];
1233 #else
1234 array[0] = 0;
1235 array[1] = 0;
1236 #endif
1237 n = 2;
1239 return n;
1242 #include "disas/disas.h"
1243 const char *qemu_sprint_backtrace(char *buffer, size_t length)
1245 char *p = buffer;
1246 if (current_cpu) {
1247 target_ulong caller[2];
1248 const char *symbol;
1249 qemu_target_backtrace(caller, 2);
1250 symbol = lookup_symbol(caller[0]);
1251 p += sprintf(p, "[%s]", symbol);
1252 symbol = lookup_symbol(caller[1]);
1253 p += sprintf(p, "[%s]", symbol);
1254 } else {
1255 p += sprintf(p, "[cpu not running]");
1257 assert((p - buffer) < length);
1258 return buffer;
1261 static void iommu_memory_region_initfn(Object *obj)
1263 MemoryRegion *mr = MEMORY_REGION(obj);
1265 mr->is_iommu = true;
1268 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1269 unsigned size)
1271 if (trace_unassigned) {
1272 char buffer[256];
1273 fprintf(stderr, "Unassigned mem read " TARGET_FMT_plx " %s\n",
1274 addr, qemu_sprint_backtrace(buffer, sizeof(buffer)));
1276 //~ vm_stop(0);
1277 if (current_cpu != NULL) {
1278 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1279 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
1281 return 0;
1284 static void unassigned_mem_write(void *opaque, hwaddr addr,
1285 uint64_t val, unsigned size)
1287 if (trace_unassigned) {
1288 char buffer[256];
1289 fprintf(stderr, "Unassigned mem write " TARGET_FMT_plx
1290 " = 0x%" PRIx64 " %s\n",
1291 addr, val, qemu_sprint_backtrace(buffer, sizeof(buffer)));
1293 if (current_cpu != NULL) {
1294 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1298 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1299 unsigned size, bool is_write,
1300 MemTxAttrs attrs)
1302 return false;
1305 const MemoryRegionOps unassigned_mem_ops = {
1306 .valid.accepts = unassigned_mem_accepts,
1307 .endianness = DEVICE_NATIVE_ENDIAN,
1310 static uint64_t memory_region_ram_device_read(void *opaque,
1311 hwaddr addr, unsigned size)
1313 MemoryRegion *mr = opaque;
1314 uint64_t data = (uint64_t)~0;
1316 switch (size) {
1317 case 1:
1318 data = *(uint8_t *)(mr->ram_block->host + addr);
1319 break;
1320 case 2:
1321 data = *(uint16_t *)(mr->ram_block->host + addr);
1322 break;
1323 case 4:
1324 data = *(uint32_t *)(mr->ram_block->host + addr);
1325 break;
1326 case 8:
1327 data = *(uint64_t *)(mr->ram_block->host + addr);
1328 break;
1331 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1333 return data;
1336 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1337 uint64_t data, unsigned size)
1339 MemoryRegion *mr = opaque;
1341 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1343 switch (size) {
1344 case 1:
1345 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1346 break;
1347 case 2:
1348 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1349 break;
1350 case 4:
1351 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1352 break;
1353 case 8:
1354 *(uint64_t *)(mr->ram_block->host + addr) = data;
1355 break;
1359 static const MemoryRegionOps ram_device_mem_ops = {
1360 .read = memory_region_ram_device_read,
1361 .write = memory_region_ram_device_write,
1362 .endianness = DEVICE_HOST_ENDIAN,
1363 .valid = {
1364 .min_access_size = 1,
1365 .max_access_size = 8,
1366 .unaligned = true,
1368 .impl = {
1369 .min_access_size = 1,
1370 .max_access_size = 8,
1371 .unaligned = true,
1375 bool memory_region_access_valid(MemoryRegion *mr,
1376 hwaddr addr,
1377 unsigned size,
1378 bool is_write,
1379 MemTxAttrs attrs)
1381 int access_size_min, access_size_max;
1382 int access_size, i;
1384 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1385 fprintf(stderr, "Misaligned i/o to address %08" HWADDR_PRIx
1386 " with size %u for memory region %s\n",
1387 addr, size, mr->name);
1388 return false;
1391 if (!mr->ops->valid.accepts) {
1392 return true;
1395 access_size_min = mr->ops->valid.min_access_size;
1396 if (!mr->ops->valid.min_access_size) {
1397 access_size_min = 1;
1400 access_size_max = mr->ops->valid.max_access_size;
1401 if (!mr->ops->valid.max_access_size) {
1402 access_size_max = 4;
1405 access_size = MAX(MIN(size, access_size_max), access_size_min);
1406 for (i = 0; i < size; i += access_size) {
1407 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1408 is_write, attrs)) {
1409 return false;
1413 return true;
1416 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1417 hwaddr addr,
1418 uint64_t *pval,
1419 unsigned size,
1420 MemTxAttrs attrs)
1422 *pval = 0;
1424 if (mr->ops->read) {
1425 return access_with_adjusted_size(addr, pval, size,
1426 mr->ops->impl.min_access_size,
1427 mr->ops->impl.max_access_size,
1428 memory_region_read_accessor,
1429 mr, attrs);
1430 } else {
1431 return access_with_adjusted_size(addr, pval, size,
1432 mr->ops->impl.min_access_size,
1433 mr->ops->impl.max_access_size,
1434 memory_region_read_with_attrs_accessor,
1435 mr, attrs);
1439 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1440 hwaddr addr,
1441 uint64_t *pval,
1442 unsigned size,
1443 MemTxAttrs attrs)
1445 MemTxResult r;
1447 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1448 *pval = unassigned_mem_read(mr, addr, size);
1449 return MEMTX_DECODE_ERROR;
1452 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1453 adjust_endianness(mr, pval, size);
1454 return r;
1457 /* Return true if an eventfd was signalled */
1458 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1459 hwaddr addr,
1460 uint64_t data,
1461 unsigned size,
1462 MemTxAttrs attrs)
1464 MemoryRegionIoeventfd ioeventfd = {
1465 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1466 .data = data,
1468 unsigned i;
1470 for (i = 0; i < mr->ioeventfd_nb; i++) {
1471 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1472 ioeventfd.e = mr->ioeventfds[i].e;
1474 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1475 event_notifier_set(ioeventfd.e);
1476 return true;
1480 return false;
1483 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1484 hwaddr addr,
1485 uint64_t data,
1486 unsigned size,
1487 MemTxAttrs attrs)
1489 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1490 unassigned_mem_write(mr, addr, data, size);
1491 return MEMTX_DECODE_ERROR;
1494 adjust_endianness(mr, &data, size);
1496 if ((!kvm_eventfds_enabled()) &&
1497 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1498 return MEMTX_OK;
1501 if (mr->ops->write) {
1502 return access_with_adjusted_size(addr, &data, size,
1503 mr->ops->impl.min_access_size,
1504 mr->ops->impl.max_access_size,
1505 memory_region_write_accessor, mr,
1506 attrs);
1507 } else {
1508 return
1509 access_with_adjusted_size(addr, &data, size,
1510 mr->ops->impl.min_access_size,
1511 mr->ops->impl.max_access_size,
1512 memory_region_write_with_attrs_accessor,
1513 mr, attrs);
1517 void memory_region_init_io(MemoryRegion *mr,
1518 Object *owner,
1519 const MemoryRegionOps *ops,
1520 void *opaque,
1521 const char *name,
1522 uint64_t size)
1524 memory_region_init(mr, owner, name, size);
1525 mr->ops = ops ? ops : &unassigned_mem_ops;
1526 mr->opaque = opaque;
1527 mr->terminates = true;
1530 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1531 Object *owner,
1532 const char *name,
1533 uint64_t size,
1534 Error **errp)
1536 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1539 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1540 Object *owner,
1541 const char *name,
1542 uint64_t size,
1543 bool share,
1544 Error **errp)
1546 Error *err = NULL;
1547 memory_region_init(mr, owner, name, size);
1548 mr->ram = true;
1549 mr->terminates = true;
1550 mr->destructor = memory_region_destructor_ram;
1551 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
1552 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1553 if (err) {
1554 mr->size = int128_zero();
1555 object_unparent(OBJECT(mr));
1556 error_propagate(errp, err);
1560 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1561 Object *owner,
1562 const char *name,
1563 uint64_t size,
1564 uint64_t max_size,
1565 void (*resized)(const char*,
1566 uint64_t length,
1567 void *host),
1568 Error **errp)
1570 Error *err = NULL;
1571 memory_region_init(mr, owner, name, size);
1572 mr->ram = true;
1573 mr->terminates = true;
1574 mr->destructor = memory_region_destructor_ram;
1575 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1576 mr, &err);
1577 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1578 if (err) {
1579 mr->size = int128_zero();
1580 object_unparent(OBJECT(mr));
1581 error_propagate(errp, err);
1585 #ifdef CONFIG_POSIX
1586 void memory_region_init_ram_from_file(MemoryRegion *mr,
1587 struct Object *owner,
1588 const char *name,
1589 uint64_t size,
1590 uint64_t align,
1591 uint32_t ram_flags,
1592 const char *path,
1593 Error **errp)
1595 Error *err = NULL;
1596 memory_region_init(mr, owner, name, size);
1597 mr->ram = true;
1598 mr->terminates = true;
1599 mr->destructor = memory_region_destructor_ram;
1600 mr->align = align;
1601 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
1602 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1603 if (err) {
1604 mr->size = int128_zero();
1605 object_unparent(OBJECT(mr));
1606 error_propagate(errp, err);
1610 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1611 struct Object *owner,
1612 const char *name,
1613 uint64_t size,
1614 bool share,
1615 int fd,
1616 Error **errp)
1618 Error *err = NULL;
1619 memory_region_init(mr, owner, name, size);
1620 mr->ram = true;
1621 mr->terminates = true;
1622 mr->destructor = memory_region_destructor_ram;
1623 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1624 share ? RAM_SHARED : 0,
1625 fd, &err);
1626 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1627 if (err) {
1628 mr->size = int128_zero();
1629 object_unparent(OBJECT(mr));
1630 error_propagate(errp, err);
1633 #endif
1635 void memory_region_init_ram_ptr(MemoryRegion *mr,
1636 Object *owner,
1637 const char *name,
1638 uint64_t size,
1639 void *ptr)
1641 memory_region_init(mr, owner, name, size);
1642 mr->ram = true;
1643 mr->terminates = true;
1644 mr->destructor = memory_region_destructor_ram;
1645 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1647 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1648 assert(ptr != NULL);
1649 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1652 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1653 Object *owner,
1654 const char *name,
1655 uint64_t size,
1656 void *ptr)
1658 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1659 mr->ram_device = true;
1660 mr->ops = &ram_device_mem_ops;
1661 mr->opaque = mr;
1664 void memory_region_init_alias(MemoryRegion *mr,
1665 Object *owner,
1666 const char *name,
1667 MemoryRegion *orig,
1668 hwaddr offset,
1669 uint64_t size)
1671 memory_region_init(mr, owner, name, size);
1672 mr->alias = orig;
1673 mr->alias_offset = offset;
1676 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1677 struct Object *owner,
1678 const char *name,
1679 uint64_t size,
1680 Error **errp)
1682 Error *err = NULL;
1683 memory_region_init(mr, owner, name, size);
1684 mr->ram = true;
1685 mr->readonly = true;
1686 mr->terminates = true;
1687 mr->destructor = memory_region_destructor_ram;
1688 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1689 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1690 if (err) {
1691 mr->size = int128_zero();
1692 object_unparent(OBJECT(mr));
1693 error_propagate(errp, err);
1697 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1698 Object *owner,
1699 const MemoryRegionOps *ops,
1700 void *opaque,
1701 const char *name,
1702 uint64_t size,
1703 Error **errp)
1705 Error *err = NULL;
1706 assert(ops);
1707 memory_region_init(mr, owner, name, size);
1708 mr->ops = ops;
1709 mr->opaque = opaque;
1710 mr->terminates = true;
1711 mr->rom_device = true;
1712 mr->destructor = memory_region_destructor_ram;
1713 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1714 if (err) {
1715 mr->size = int128_zero();
1716 object_unparent(OBJECT(mr));
1717 error_propagate(errp, err);
1721 void memory_region_init_iommu(void *_iommu_mr,
1722 size_t instance_size,
1723 const char *mrtypename,
1724 Object *owner,
1725 const char *name,
1726 uint64_t size)
1728 struct IOMMUMemoryRegion *iommu_mr;
1729 struct MemoryRegion *mr;
1731 object_initialize(_iommu_mr, instance_size, mrtypename);
1732 mr = MEMORY_REGION(_iommu_mr);
1733 memory_region_do_init(mr, owner, name, size);
1734 iommu_mr = IOMMU_MEMORY_REGION(mr);
1735 mr->terminates = true; /* then re-forwards */
1736 QLIST_INIT(&iommu_mr->iommu_notify);
1737 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1740 static void memory_region_finalize(Object *obj)
1742 MemoryRegion *mr = MEMORY_REGION(obj);
1744 assert(!mr->container);
1746 /* We know the region is not visible in any address space (it
1747 * does not have a container and cannot be a root either because
1748 * it has no references, so we can blindly clear mr->enabled.
1749 * memory_region_set_enabled instead could trigger a transaction
1750 * and cause an infinite loop.
1752 mr->enabled = false;
1753 memory_region_transaction_begin();
1754 while (!QTAILQ_EMPTY(&mr->subregions)) {
1755 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1756 memory_region_del_subregion(mr, subregion);
1758 memory_region_transaction_commit();
1760 mr->destructor(mr);
1761 memory_region_clear_coalescing(mr);
1762 g_free((char *)mr->name);
1763 g_free(mr->ioeventfds);
1766 Object *memory_region_owner(MemoryRegion *mr)
1768 Object *obj = OBJECT(mr);
1769 return obj->parent;
1772 void memory_region_ref(MemoryRegion *mr)
1774 /* MMIO callbacks most likely will access data that belongs
1775 * to the owner, hence the need to ref/unref the owner whenever
1776 * the memory region is in use.
1778 * The memory region is a child of its owner. As long as the
1779 * owner doesn't call unparent itself on the memory region,
1780 * ref-ing the owner will also keep the memory region alive.
1781 * Memory regions without an owner are supposed to never go away;
1782 * we do not ref/unref them because it slows down DMA sensibly.
1784 if (mr && mr->owner) {
1785 object_ref(mr->owner);
1789 void memory_region_unref(MemoryRegion *mr)
1791 if (mr && mr->owner) {
1792 object_unref(mr->owner);
1796 uint64_t memory_region_size(MemoryRegion *mr)
1798 if (int128_eq(mr->size, int128_2_64())) {
1799 return UINT64_MAX;
1801 return int128_get64(mr->size);
1804 const char *memory_region_name(const MemoryRegion *mr)
1806 if (!mr->name) {
1807 ((MemoryRegion *)mr)->name =
1808 object_get_canonical_path_component(OBJECT(mr));
1810 return mr->name;
1813 bool memory_region_is_ram_device(MemoryRegion *mr)
1815 return mr->ram_device;
1818 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1820 uint8_t mask = mr->dirty_log_mask;
1821 if (global_dirty_log && mr->ram_block) {
1822 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1824 return mask;
1827 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1829 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1832 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1834 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1835 IOMMUNotifier *iommu_notifier;
1836 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1838 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1839 flags |= iommu_notifier->notifier_flags;
1842 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1843 imrc->notify_flag_changed(iommu_mr,
1844 iommu_mr->iommu_notify_flags,
1845 flags);
1848 iommu_mr->iommu_notify_flags = flags;
1851 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1852 IOMMUNotifier *n)
1854 IOMMUMemoryRegion *iommu_mr;
1856 if (mr->alias) {
1857 memory_region_register_iommu_notifier(mr->alias, n);
1858 return;
1861 /* We need to register for at least one bitfield */
1862 iommu_mr = IOMMU_MEMORY_REGION(mr);
1863 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1864 assert(n->start <= n->end);
1865 assert(n->iommu_idx >= 0 &&
1866 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1868 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1869 memory_region_update_iommu_notify_flags(iommu_mr);
1872 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1874 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1876 if (imrc->get_min_page_size) {
1877 return imrc->get_min_page_size(iommu_mr);
1879 return TARGET_PAGE_SIZE;
1882 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1884 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1885 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1886 hwaddr addr, granularity;
1887 IOMMUTLBEntry iotlb;
1889 /* If the IOMMU has its own replay callback, override */
1890 if (imrc->replay) {
1891 imrc->replay(iommu_mr, n);
1892 return;
1895 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1897 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1898 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1899 if (iotlb.perm != IOMMU_NONE) {
1900 n->notify(n, &iotlb);
1903 /* if (2^64 - MR size) < granularity, it's possible to get an
1904 * infinite loop here. This should catch such a wraparound */
1905 if ((addr + granularity) < addr) {
1906 break;
1911 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
1913 IOMMUNotifier *notifier;
1915 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1916 memory_region_iommu_replay(iommu_mr, notifier);
1920 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1921 IOMMUNotifier *n)
1923 IOMMUMemoryRegion *iommu_mr;
1925 if (mr->alias) {
1926 memory_region_unregister_iommu_notifier(mr->alias, n);
1927 return;
1929 QLIST_REMOVE(n, node);
1930 iommu_mr = IOMMU_MEMORY_REGION(mr);
1931 memory_region_update_iommu_notify_flags(iommu_mr);
1934 void memory_region_notify_one(IOMMUNotifier *notifier,
1935 IOMMUTLBEntry *entry)
1937 IOMMUNotifierFlag request_flags;
1940 * Skip the notification if the notification does not overlap
1941 * with registered range.
1943 if (notifier->start > entry->iova + entry->addr_mask ||
1944 notifier->end < entry->iova) {
1945 return;
1948 if (entry->perm & IOMMU_RW) {
1949 request_flags = IOMMU_NOTIFIER_MAP;
1950 } else {
1951 request_flags = IOMMU_NOTIFIER_UNMAP;
1954 if (notifier->notifier_flags & request_flags) {
1955 notifier->notify(notifier, entry);
1959 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1960 int iommu_idx,
1961 IOMMUTLBEntry entry)
1963 IOMMUNotifier *iommu_notifier;
1965 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1967 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1968 if (iommu_notifier->iommu_idx == iommu_idx) {
1969 memory_region_notify_one(iommu_notifier, &entry);
1974 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1975 enum IOMMUMemoryRegionAttr attr,
1976 void *data)
1978 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1980 if (!imrc->get_attr) {
1981 return -EINVAL;
1984 return imrc->get_attr(iommu_mr, attr, data);
1987 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1988 MemTxAttrs attrs)
1990 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1992 if (!imrc->attrs_to_index) {
1993 return 0;
1996 return imrc->attrs_to_index(iommu_mr, attrs);
1999 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2001 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2003 if (!imrc->num_indexes) {
2004 return 1;
2007 return imrc->num_indexes(iommu_mr);
2010 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2012 uint8_t mask = 1 << client;
2013 uint8_t old_logging;
2015 assert(client == DIRTY_MEMORY_VGA);
2016 old_logging = mr->vga_logging_count;
2017 mr->vga_logging_count += log ? 1 : -1;
2018 if (!!old_logging == !!mr->vga_logging_count) {
2019 return;
2022 memory_region_transaction_begin();
2023 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2024 memory_region_update_pending |= mr->enabled;
2025 memory_region_transaction_commit();
2028 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
2029 hwaddr size, unsigned client)
2031 assert(mr->ram_block);
2032 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
2033 size, client);
2036 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2037 hwaddr size)
2039 assert(mr->ram_block);
2040 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2041 size,
2042 memory_region_get_dirty_log_mask(mr));
2045 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
2047 MemoryListener *listener;
2048 AddressSpace *as;
2049 FlatView *view;
2050 FlatRange *fr;
2052 /* If the same address space has multiple log_sync listeners, we
2053 * visit that address space's FlatView multiple times. But because
2054 * log_sync listeners are rare, it's still cheaper than walking each
2055 * address space once.
2057 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2058 if (!listener->log_sync) {
2059 continue;
2061 as = listener->address_space;
2062 view = address_space_get_flatview(as);
2063 FOR_EACH_FLAT_RANGE(fr, view) {
2064 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2065 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2066 listener->log_sync(listener, &mrs);
2069 flatview_unref(view);
2073 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2074 hwaddr addr,
2075 hwaddr size,
2076 unsigned client)
2078 assert(mr->ram_block);
2079 memory_region_sync_dirty_bitmap(mr);
2080 return cpu_physical_memory_snapshot_and_clear_dirty(
2081 memory_region_get_ram_addr(mr) + addr, size, client);
2084 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2085 hwaddr addr, hwaddr size)
2087 assert(mr->ram_block);
2088 return cpu_physical_memory_snapshot_get_dirty(snap,
2089 memory_region_get_ram_addr(mr) + addr, size);
2092 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2094 if (mr->readonly != readonly) {
2095 memory_region_transaction_begin();
2096 mr->readonly = readonly;
2097 memory_region_update_pending |= mr->enabled;
2098 memory_region_transaction_commit();
2102 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2104 if (mr->nonvolatile != nonvolatile) {
2105 memory_region_transaction_begin();
2106 mr->nonvolatile = nonvolatile;
2107 memory_region_update_pending |= mr->enabled;
2108 memory_region_transaction_commit();
2112 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2114 if (mr->romd_mode != romd_mode) {
2115 memory_region_transaction_begin();
2116 mr->romd_mode = romd_mode;
2117 memory_region_update_pending |= mr->enabled;
2118 memory_region_transaction_commit();
2122 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2123 hwaddr size, unsigned client)
2125 assert(mr->ram_block);
2126 cpu_physical_memory_test_and_clear_dirty(
2127 memory_region_get_ram_addr(mr) + addr, size, client);
2130 int memory_region_get_fd(MemoryRegion *mr)
2132 int fd;
2134 rcu_read_lock();
2135 while (mr->alias) {
2136 mr = mr->alias;
2138 fd = mr->ram_block->fd;
2139 rcu_read_unlock();
2141 return fd;
2144 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2146 void *ptr;
2147 uint64_t offset = 0;
2149 rcu_read_lock();
2150 while (mr->alias) {
2151 offset += mr->alias_offset;
2152 mr = mr->alias;
2154 assert(mr->ram_block);
2155 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2156 rcu_read_unlock();
2158 return ptr;
2161 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2163 RAMBlock *block;
2165 block = qemu_ram_block_from_host(ptr, false, offset);
2166 if (!block) {
2167 return NULL;
2170 return block->mr;
2173 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2175 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2178 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2180 assert(mr->ram_block);
2182 qemu_ram_resize(mr->ram_block, newsize, errp);
2185 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
2187 FlatView *view;
2188 FlatRange *fr;
2189 CoalescedMemoryRange *cmr;
2190 AddrRange tmp;
2191 MemoryRegionSection section;
2193 view = address_space_get_flatview(as);
2194 FOR_EACH_FLAT_RANGE(fr, view) {
2195 if (fr->mr == mr) {
2196 section = (MemoryRegionSection) {
2197 .fv = view,
2198 .offset_within_address_space = int128_get64(fr->addr.start),
2199 .size = fr->addr.size,
2202 MEMORY_LISTENER_CALL(as, coalesced_io_del, Reverse, &section,
2203 int128_get64(fr->addr.start),
2204 int128_get64(fr->addr.size));
2205 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2206 tmp = addrrange_shift(cmr->addr,
2207 int128_sub(fr->addr.start,
2208 int128_make64(fr->offset_in_region)));
2209 if (!addrrange_intersects(tmp, fr->addr)) {
2210 continue;
2212 tmp = addrrange_intersection(tmp, fr->addr);
2213 MEMORY_LISTENER_CALL(as, coalesced_io_add, Forward, &section,
2214 int128_get64(tmp.start),
2215 int128_get64(tmp.size));
2219 flatview_unref(view);
2222 static void memory_region_update_coalesced_range(MemoryRegion *mr)
2224 AddressSpace *as;
2226 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2227 memory_region_update_coalesced_range_as(mr, as);
2231 void memory_region_set_coalescing(MemoryRegion *mr)
2233 memory_region_clear_coalescing(mr);
2234 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2237 void memory_region_add_coalescing(MemoryRegion *mr,
2238 hwaddr offset,
2239 uint64_t size)
2241 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2243 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2244 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2245 memory_region_update_coalesced_range(mr);
2246 memory_region_set_flush_coalesced(mr);
2249 void memory_region_clear_coalescing(MemoryRegion *mr)
2251 CoalescedMemoryRange *cmr;
2252 bool updated = false;
2254 qemu_flush_coalesced_mmio_buffer();
2255 mr->flush_coalesced_mmio = false;
2257 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2258 cmr = QTAILQ_FIRST(&mr->coalesced);
2259 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2260 g_free(cmr);
2261 updated = true;
2264 if (updated) {
2265 memory_region_update_coalesced_range(mr);
2269 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2271 mr->flush_coalesced_mmio = true;
2274 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2276 qemu_flush_coalesced_mmio_buffer();
2277 if (QTAILQ_EMPTY(&mr->coalesced)) {
2278 mr->flush_coalesced_mmio = false;
2282 void memory_region_clear_global_locking(MemoryRegion *mr)
2284 mr->global_locking = false;
2287 static bool userspace_eventfd_warning;
2289 void memory_region_add_eventfd(MemoryRegion *mr,
2290 hwaddr addr,
2291 unsigned size,
2292 bool match_data,
2293 uint64_t data,
2294 EventNotifier *e)
2296 MemoryRegionIoeventfd mrfd = {
2297 .addr.start = int128_make64(addr),
2298 .addr.size = int128_make64(size),
2299 .match_data = match_data,
2300 .data = data,
2301 .e = e,
2303 unsigned i;
2305 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2306 userspace_eventfd_warning))) {
2307 userspace_eventfd_warning = true;
2308 error_report("Using eventfd without MMIO binding in KVM. "
2309 "Suboptimal performance expected");
2312 if (size) {
2313 adjust_endianness(mr, &mrfd.data, size);
2315 memory_region_transaction_begin();
2316 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2317 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2318 break;
2321 ++mr->ioeventfd_nb;
2322 mr->ioeventfds = g_realloc(mr->ioeventfds,
2323 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2324 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2325 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2326 mr->ioeventfds[i] = mrfd;
2327 ioeventfd_update_pending |= mr->enabled;
2328 memory_region_transaction_commit();
2331 void memory_region_del_eventfd(MemoryRegion *mr,
2332 hwaddr addr,
2333 unsigned size,
2334 bool match_data,
2335 uint64_t data,
2336 EventNotifier *e)
2338 MemoryRegionIoeventfd mrfd = {
2339 .addr.start = int128_make64(addr),
2340 .addr.size = int128_make64(size),
2341 .match_data = match_data,
2342 .data = data,
2343 .e = e,
2345 unsigned i;
2347 if (size) {
2348 adjust_endianness(mr, &mrfd.data, size);
2350 memory_region_transaction_begin();
2351 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2352 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2353 break;
2356 assert(i != mr->ioeventfd_nb);
2357 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2358 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2359 --mr->ioeventfd_nb;
2360 mr->ioeventfds = g_realloc(mr->ioeventfds,
2361 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2362 ioeventfd_update_pending |= mr->enabled;
2363 memory_region_transaction_commit();
2366 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2368 MemoryRegion *mr = subregion->container;
2369 MemoryRegion *other;
2371 memory_region_transaction_begin();
2373 memory_region_ref(subregion);
2374 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2375 if (subregion->priority >= other->priority) {
2376 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2377 goto done;
2380 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2381 done:
2382 memory_region_update_pending |= mr->enabled && subregion->enabled;
2383 memory_region_transaction_commit();
2386 static void memory_region_add_subregion_common(MemoryRegion *mr,
2387 hwaddr offset,
2388 MemoryRegion *subregion)
2390 assert(!subregion->container);
2391 subregion->container = mr;
2392 subregion->addr = offset;
2393 memory_region_update_container_subregions(subregion);
2396 void memory_region_add_subregion(MemoryRegion *mr,
2397 hwaddr offset,
2398 MemoryRegion *subregion)
2400 subregion->priority = 0;
2401 memory_region_add_subregion_common(mr, offset, subregion);
2404 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2405 hwaddr offset,
2406 MemoryRegion *subregion,
2407 int priority)
2409 subregion->priority = priority;
2410 memory_region_add_subregion_common(mr, offset, subregion);
2413 void memory_region_del_subregion(MemoryRegion *mr,
2414 MemoryRegion *subregion)
2416 memory_region_transaction_begin();
2417 assert(subregion->container == mr);
2418 subregion->container = NULL;
2419 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2420 memory_region_unref(subregion);
2421 memory_region_update_pending |= mr->enabled && subregion->enabled;
2422 memory_region_transaction_commit();
2425 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2427 if (enabled == mr->enabled) {
2428 return;
2430 memory_region_transaction_begin();
2431 mr->enabled = enabled;
2432 memory_region_update_pending = true;
2433 memory_region_transaction_commit();
2436 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2438 Int128 s = int128_make64(size);
2440 if (size == UINT64_MAX) {
2441 s = int128_2_64();
2443 if (int128_eq(s, mr->size)) {
2444 return;
2446 memory_region_transaction_begin();
2447 mr->size = s;
2448 memory_region_update_pending = true;
2449 memory_region_transaction_commit();
2452 static void memory_region_readd_subregion(MemoryRegion *mr)
2454 MemoryRegion *container = mr->container;
2456 if (container) {
2457 memory_region_transaction_begin();
2458 memory_region_ref(mr);
2459 memory_region_del_subregion(container, mr);
2460 mr->container = container;
2461 memory_region_update_container_subregions(mr);
2462 memory_region_unref(mr);
2463 memory_region_transaction_commit();
2467 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2469 if (addr != mr->addr) {
2470 mr->addr = addr;
2471 memory_region_readd_subregion(mr);
2475 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2477 assert(mr->alias);
2479 if (offset == mr->alias_offset) {
2480 return;
2483 memory_region_transaction_begin();
2484 mr->alias_offset = offset;
2485 memory_region_update_pending |= mr->enabled;
2486 memory_region_transaction_commit();
2489 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2491 return mr->align;
2494 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2496 const AddrRange *addr = addr_;
2497 const FlatRange *fr = fr_;
2499 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2500 return -1;
2501 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2502 return 1;
2504 return 0;
2507 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2509 return bsearch(&addr, view->ranges, view->nr,
2510 sizeof(FlatRange), cmp_flatrange_addr);
2513 bool memory_region_is_mapped(MemoryRegion *mr)
2515 return mr->container ? true : false;
2518 /* Same as memory_region_find, but it does not add a reference to the
2519 * returned region. It must be called from an RCU critical section.
2521 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2522 hwaddr addr, uint64_t size)
2524 MemoryRegionSection ret = { .mr = NULL };
2525 MemoryRegion *root;
2526 AddressSpace *as;
2527 AddrRange range;
2528 FlatView *view;
2529 FlatRange *fr;
2531 addr += mr->addr;
2532 for (root = mr; root->container; ) {
2533 root = root->container;
2534 addr += root->addr;
2537 as = memory_region_to_address_space(root);
2538 if (!as) {
2539 return ret;
2541 range = addrrange_make(int128_make64(addr), int128_make64(size));
2543 view = address_space_to_flatview(as);
2544 fr = flatview_lookup(view, range);
2545 if (!fr) {
2546 return ret;
2549 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2550 --fr;
2553 ret.mr = fr->mr;
2554 ret.fv = view;
2555 range = addrrange_intersection(range, fr->addr);
2556 ret.offset_within_region = fr->offset_in_region;
2557 ret.offset_within_region += int128_get64(int128_sub(range.start,
2558 fr->addr.start));
2559 ret.size = range.size;
2560 ret.offset_within_address_space = int128_get64(range.start);
2561 ret.readonly = fr->readonly;
2562 ret.nonvolatile = fr->nonvolatile;
2563 return ret;
2566 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2567 hwaddr addr, uint64_t size)
2569 MemoryRegionSection ret;
2570 rcu_read_lock();
2571 ret = memory_region_find_rcu(mr, addr, size);
2572 if (ret.mr) {
2573 memory_region_ref(ret.mr);
2575 rcu_read_unlock();
2576 return ret;
2579 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2581 MemoryRegion *mr;
2583 rcu_read_lock();
2584 mr = memory_region_find_rcu(container, addr, 1).mr;
2585 rcu_read_unlock();
2586 return mr && mr != container;
2589 void memory_global_dirty_log_sync(void)
2591 memory_region_sync_dirty_bitmap(NULL);
2594 static VMChangeStateEntry *vmstate_change;
2596 void memory_global_dirty_log_start(void)
2598 if (vmstate_change) {
2599 qemu_del_vm_change_state_handler(vmstate_change);
2600 vmstate_change = NULL;
2603 global_dirty_log = true;
2605 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2607 /* Refresh DIRTY_LOG_MIGRATION bit. */
2608 memory_region_transaction_begin();
2609 memory_region_update_pending = true;
2610 memory_region_transaction_commit();
2613 static void memory_global_dirty_log_do_stop(void)
2615 global_dirty_log = false;
2617 /* Refresh DIRTY_LOG_MIGRATION bit. */
2618 memory_region_transaction_begin();
2619 memory_region_update_pending = true;
2620 memory_region_transaction_commit();
2622 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2625 static void memory_vm_change_state_handler(void *opaque, int running,
2626 RunState state)
2628 if (running) {
2629 memory_global_dirty_log_do_stop();
2631 if (vmstate_change) {
2632 qemu_del_vm_change_state_handler(vmstate_change);
2633 vmstate_change = NULL;
2638 void memory_global_dirty_log_stop(void)
2640 if (!runstate_is_running()) {
2641 if (vmstate_change) {
2642 return;
2644 vmstate_change = qemu_add_vm_change_state_handler(
2645 memory_vm_change_state_handler, NULL);
2646 return;
2649 memory_global_dirty_log_do_stop();
2652 static void listener_add_address_space(MemoryListener *listener,
2653 AddressSpace *as)
2655 FlatView *view;
2656 FlatRange *fr;
2658 if (listener->begin) {
2659 listener->begin(listener);
2661 if (global_dirty_log) {
2662 if (listener->log_global_start) {
2663 listener->log_global_start(listener);
2667 view = address_space_get_flatview(as);
2668 FOR_EACH_FLAT_RANGE(fr, view) {
2669 MemoryRegionSection section = section_from_flat_range(fr, view);
2671 if (listener->region_add) {
2672 listener->region_add(listener, &section);
2674 if (fr->dirty_log_mask && listener->log_start) {
2675 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2678 if (listener->commit) {
2679 listener->commit(listener);
2681 flatview_unref(view);
2684 static void listener_del_address_space(MemoryListener *listener,
2685 AddressSpace *as)
2687 FlatView *view;
2688 FlatRange *fr;
2690 if (listener->begin) {
2691 listener->begin(listener);
2693 view = address_space_get_flatview(as);
2694 FOR_EACH_FLAT_RANGE(fr, view) {
2695 MemoryRegionSection section = section_from_flat_range(fr, view);
2697 if (fr->dirty_log_mask && listener->log_stop) {
2698 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2700 if (listener->region_del) {
2701 listener->region_del(listener, &section);
2704 if (listener->commit) {
2705 listener->commit(listener);
2707 flatview_unref(view);
2710 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2712 MemoryListener *other = NULL;
2714 listener->address_space = as;
2715 if (QTAILQ_EMPTY(&memory_listeners)
2716 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2717 memory_listeners)->priority) {
2718 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2719 } else {
2720 QTAILQ_FOREACH(other, &memory_listeners, link) {
2721 if (listener->priority < other->priority) {
2722 break;
2725 QTAILQ_INSERT_BEFORE(other, listener, link);
2728 if (QTAILQ_EMPTY(&as->listeners)
2729 || listener->priority >= QTAILQ_LAST(&as->listeners,
2730 memory_listeners)->priority) {
2731 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2732 } else {
2733 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2734 if (listener->priority < other->priority) {
2735 break;
2738 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2741 listener_add_address_space(listener, as);
2744 void memory_listener_unregister(MemoryListener *listener)
2746 if (!listener->address_space) {
2747 return;
2750 listener_del_address_space(listener, listener->address_space);
2751 QTAILQ_REMOVE(&memory_listeners, listener, link);
2752 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2753 listener->address_space = NULL;
2756 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2758 memory_region_ref(root);
2759 as->root = root;
2760 as->current_map = NULL;
2761 as->ioeventfd_nb = 0;
2762 as->ioeventfds = NULL;
2763 QTAILQ_INIT(&as->listeners);
2764 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2765 as->name = g_strdup(name ? name : "anonymous");
2766 address_space_update_topology(as);
2767 address_space_update_ioeventfds(as);
2770 static void do_address_space_destroy(AddressSpace *as)
2772 assert(QTAILQ_EMPTY(&as->listeners));
2774 flatview_unref(as->current_map);
2775 g_free(as->name);
2776 g_free(as->ioeventfds);
2777 memory_region_unref(as->root);
2780 void address_space_destroy(AddressSpace *as)
2782 MemoryRegion *root = as->root;
2784 /* Flush out anything from MemoryListeners listening in on this */
2785 memory_region_transaction_begin();
2786 as->root = NULL;
2787 memory_region_transaction_commit();
2788 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2790 /* At this point, as->dispatch and as->current_map are dummy
2791 * entries that the guest should never use. Wait for the old
2792 * values to expire before freeing the data.
2794 as->root = root;
2795 call_rcu(as, do_address_space_destroy, rcu);
2798 static const char *memory_region_type(MemoryRegion *mr)
2800 if (memory_region_is_ram_device(mr)) {
2801 return "ramd";
2802 } else if (memory_region_is_romd(mr)) {
2803 return "romd";
2804 } else if (memory_region_is_rom(mr)) {
2805 return "rom";
2806 } else if (memory_region_is_ram(mr)) {
2807 return "ram";
2808 } else {
2809 return "i/o";
2813 typedef struct MemoryRegionList MemoryRegionList;
2815 struct MemoryRegionList {
2816 const MemoryRegion *mr;
2817 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
2820 typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
2822 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2823 int128_sub((size), int128_one())) : 0)
2824 #define MTREE_INDENT " "
2826 static void mtree_expand_owner(fprintf_function mon_printf, void *f,
2827 const char *label, Object *obj)
2829 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2831 mon_printf(f, " %s:{%s", label, dev ? "dev" : "obj");
2832 if (dev && dev->id) {
2833 mon_printf(f, " id=%s", dev->id);
2834 } else {
2835 gchar *canonical_path = object_get_canonical_path(obj);
2836 if (canonical_path) {
2837 mon_printf(f, " path=%s", canonical_path);
2838 g_free(canonical_path);
2839 } else {
2840 mon_printf(f, " type=%s", object_get_typename(obj));
2843 mon_printf(f, "}");
2846 static void mtree_print_mr_owner(fprintf_function mon_printf, void *f,
2847 const MemoryRegion *mr)
2849 Object *owner = mr->owner;
2850 Object *parent = memory_region_owner((MemoryRegion *)mr);
2852 if (!owner && !parent) {
2853 mon_printf(f, " orphan");
2854 return;
2856 if (owner) {
2857 mtree_expand_owner(mon_printf, f, "owner", owner);
2859 if (parent && parent != owner) {
2860 mtree_expand_owner(mon_printf, f, "parent", parent);
2864 static void mtree_print_mr(fprintf_function mon_printf, void *f,
2865 const MemoryRegion *mr, unsigned int level,
2866 hwaddr base,
2867 MemoryRegionListHead *alias_print_queue,
2868 bool owner)
2870 MemoryRegionList *new_ml, *ml, *next_ml;
2871 MemoryRegionListHead submr_print_queue;
2872 const MemoryRegion *submr;
2873 unsigned int i;
2874 hwaddr cur_start, cur_end;
2876 if (!mr) {
2877 return;
2880 for (i = 0; i < level; i++) {
2881 mon_printf(f, MTREE_INDENT);
2884 cur_start = base + mr->addr;
2885 cur_end = cur_start + MR_SIZE(mr->size);
2888 * Try to detect overflow of memory region. This should never
2889 * happen normally. When it happens, we dump something to warn the
2890 * user who is observing this.
2892 if (cur_start < base || cur_end < cur_start) {
2893 mon_printf(f, "[DETECTED OVERFLOW!] ");
2896 if (mr->alias) {
2897 MemoryRegionList *ml;
2898 bool found = false;
2900 /* check if the alias is already in the queue */
2901 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
2902 if (ml->mr == mr->alias) {
2903 found = true;
2907 if (!found) {
2908 ml = g_new(MemoryRegionList, 1);
2909 ml->mr = mr->alias;
2910 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
2912 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2913 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2914 "-" TARGET_FMT_plx "%s",
2915 cur_start, cur_end,
2916 mr->priority,
2917 mr->nonvolatile ? "nv-" : "",
2918 memory_region_type((MemoryRegion *)mr),
2919 memory_region_name(mr),
2920 memory_region_name(mr->alias),
2921 mr->alias_offset,
2922 mr->alias_offset + MR_SIZE(mr->size),
2923 mr->enabled ? "" : " [disabled]");
2924 if (owner) {
2925 mtree_print_mr_owner(mon_printf, f, mr);
2927 } else {
2928 mon_printf(f,
2929 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s%s): %s%s",
2930 cur_start, cur_end,
2931 mr->priority,
2932 mr->nonvolatile ? "nv-" : "",
2933 memory_region_type((MemoryRegion *)mr),
2934 memory_region_name(mr),
2935 mr->enabled ? "" : " [disabled]");
2936 if (owner) {
2937 mtree_print_mr_owner(mon_printf, f, mr);
2940 mon_printf(f, "\n");
2942 QTAILQ_INIT(&submr_print_queue);
2944 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2945 new_ml = g_new(MemoryRegionList, 1);
2946 new_ml->mr = submr;
2947 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2948 if (new_ml->mr->addr < ml->mr->addr ||
2949 (new_ml->mr->addr == ml->mr->addr &&
2950 new_ml->mr->priority > ml->mr->priority)) {
2951 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2952 new_ml = NULL;
2953 break;
2956 if (new_ml) {
2957 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2961 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2962 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
2963 alias_print_queue, owner);
2966 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
2967 g_free(ml);
2971 struct FlatViewInfo {
2972 fprintf_function mon_printf;
2973 void *f;
2974 int counter;
2975 bool dispatch_tree;
2976 bool owner;
2979 static void mtree_print_flatview(gpointer key, gpointer value,
2980 gpointer user_data)
2982 FlatView *view = key;
2983 GArray *fv_address_spaces = value;
2984 struct FlatViewInfo *fvi = user_data;
2985 fprintf_function p = fvi->mon_printf;
2986 void *f = fvi->f;
2987 FlatRange *range = &view->ranges[0];
2988 MemoryRegion *mr;
2989 int n = view->nr;
2990 int i;
2991 AddressSpace *as;
2993 p(f, "FlatView #%d\n", fvi->counter);
2994 ++fvi->counter;
2996 for (i = 0; i < fv_address_spaces->len; ++i) {
2997 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2998 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2999 if (as->root->alias) {
3000 p(f, ", alias %s", memory_region_name(as->root->alias));
3002 p(f, "\n");
3005 p(f, " Root memory region: %s\n",
3006 view->root ? memory_region_name(view->root) : "(none)");
3008 if (n <= 0) {
3009 p(f, MTREE_INDENT "No rendered FlatView\n\n");
3010 return;
3013 while (n--) {
3014 mr = range->mr;
3015 if (range->offset_in_region) {
3016 p(f, MTREE_INDENT TARGET_FMT_plx "-"
3017 TARGET_FMT_plx " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3018 int128_get64(range->addr.start),
3019 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3020 mr->priority,
3021 range->nonvolatile ? "nv-" : "",
3022 range->readonly ? "rom" : memory_region_type(mr),
3023 memory_region_name(mr),
3024 range->offset_in_region);
3025 } else {
3026 p(f, MTREE_INDENT TARGET_FMT_plx "-"
3027 TARGET_FMT_plx " (prio %d, %s%s): %s",
3028 int128_get64(range->addr.start),
3029 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3030 mr->priority,
3031 range->nonvolatile ? "nv-" : "",
3032 range->readonly ? "rom" : memory_region_type(mr),
3033 memory_region_name(mr));
3035 if (fvi->owner) {
3036 mtree_print_mr_owner(p, f, mr);
3038 p(f, "\n");
3039 range++;
3042 #if !defined(CONFIG_USER_ONLY)
3043 if (fvi->dispatch_tree && view->root) {
3044 mtree_print_dispatch(p, f, view->dispatch, view->root);
3046 #endif
3048 p(f, "\n");
3051 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3052 gpointer user_data)
3054 FlatView *view = key;
3055 GArray *fv_address_spaces = value;
3057 g_array_unref(fv_address_spaces);
3058 flatview_unref(view);
3060 return true;
3063 void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3064 bool dispatch_tree, bool owner)
3066 MemoryRegionListHead ml_head;
3067 MemoryRegionList *ml, *ml2;
3068 AddressSpace *as;
3070 if (flatview) {
3071 FlatView *view;
3072 struct FlatViewInfo fvi = {
3073 .mon_printf = mon_printf,
3074 .f = f,
3075 .counter = 0,
3076 .dispatch_tree = dispatch_tree,
3077 .owner = owner,
3079 GArray *fv_address_spaces;
3080 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3082 /* Gather all FVs in one table */
3083 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3084 view = address_space_get_flatview(as);
3086 fv_address_spaces = g_hash_table_lookup(views, view);
3087 if (!fv_address_spaces) {
3088 fv_address_spaces = g_array_new(false, false, sizeof(as));
3089 g_hash_table_insert(views, view, fv_address_spaces);
3092 g_array_append_val(fv_address_spaces, as);
3095 /* Print */
3096 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3098 /* Free */
3099 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3100 g_hash_table_unref(views);
3102 return;
3105 QTAILQ_INIT(&ml_head);
3107 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3108 mon_printf(f, "address-space: %s\n", as->name);
3109 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head, owner);
3110 mon_printf(f, "\n");
3113 /* print aliased regions */
3114 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3115 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3116 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head, owner);
3117 mon_printf(f, "\n");
3120 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3121 g_free(ml);
3125 void memory_region_init_ram(MemoryRegion *mr,
3126 struct Object *owner,
3127 const char *name,
3128 uint64_t size,
3129 Error **errp)
3131 DeviceState *owner_dev;
3132 Error *err = NULL;
3134 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3135 if (err) {
3136 error_propagate(errp, err);
3137 return;
3139 /* This will assert if owner is neither NULL nor a DeviceState.
3140 * We only want the owner here for the purposes of defining a
3141 * unique name for migration. TODO: Ideally we should implement
3142 * a naming scheme for Objects which are not DeviceStates, in
3143 * which case we can relax this restriction.
3145 owner_dev = DEVICE(owner);
3146 vmstate_register_ram(mr, owner_dev);
3149 void memory_region_init_rom(MemoryRegion *mr,
3150 struct Object *owner,
3151 const char *name,
3152 uint64_t size,
3153 Error **errp)
3155 DeviceState *owner_dev;
3156 Error *err = NULL;
3158 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3159 if (err) {
3160 error_propagate(errp, err);
3161 return;
3163 /* This will assert if owner is neither NULL nor a DeviceState.
3164 * We only want the owner here for the purposes of defining a
3165 * unique name for migration. TODO: Ideally we should implement
3166 * a naming scheme for Objects which are not DeviceStates, in
3167 * which case we can relax this restriction.
3169 owner_dev = DEVICE(owner);
3170 vmstate_register_ram(mr, owner_dev);
3173 void memory_region_init_rom_device(MemoryRegion *mr,
3174 struct Object *owner,
3175 const MemoryRegionOps *ops,
3176 void *opaque,
3177 const char *name,
3178 uint64_t size,
3179 Error **errp)
3181 DeviceState *owner_dev;
3182 Error *err = NULL;
3184 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3185 name, size, &err);
3186 if (err) {
3187 error_propagate(errp, err);
3188 return;
3190 /* This will assert if owner is neither NULL nor a DeviceState.
3191 * We only want the owner here for the purposes of defining a
3192 * unique name for migration. TODO: Ideally we should implement
3193 * a naming scheme for Objects which are not DeviceStates, in
3194 * which case we can relax this restriction.
3196 owner_dev = DEVICE(owner);
3197 vmstate_register_ram(mr, owner_dev);
3200 static const TypeInfo memory_region_info = {
3201 .parent = TYPE_OBJECT,
3202 .name = TYPE_MEMORY_REGION,
3203 .instance_size = sizeof(MemoryRegion),
3204 .instance_init = memory_region_initfn,
3205 .instance_finalize = memory_region_finalize,
3208 static const TypeInfo iommu_memory_region_info = {
3209 .parent = TYPE_MEMORY_REGION,
3210 .name = TYPE_IOMMU_MEMORY_REGION,
3211 .class_size = sizeof(IOMMUMemoryRegionClass),
3212 .instance_size = sizeof(IOMMUMemoryRegion),
3213 .instance_init = iommu_memory_region_initfn,
3214 .abstract = true,
3217 static void memory_register_types(void)
3219 type_register_static(&memory_region_info);
3220 type_register_static(&iommu_memory_region_info);
3223 type_init(memory_register_types)