Merge remote-tracking branch 'remotes/mjt/tags/trivial-patches-fetch' into staging
[qemu.git] / memory.c
blob5e6351a6c1cb7cab2f8de3ddcd7456d5b8478021
1 /*
2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "cpu.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "exec/ioport.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qom/object.h"
27 #include "trace-root.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "hw/misc/mmio_interface.h"
34 #include "hw/qdev-properties.h"
35 #include "migration/vmstate.h"
37 //#define DEBUG_UNASSIGNED
39 static unsigned memory_region_transaction_depth;
40 static bool memory_region_update_pending;
41 static bool ioeventfd_update_pending;
42 static bool global_dirty_log = false;
44 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
47 static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50 static GHashTable *flat_views;
52 typedef struct AddrRange AddrRange;
55 * Note that signed integers are needed for negative offsetting in aliases
56 * (large MemoryRegion::alias_offset).
58 struct AddrRange {
59 Int128 start;
60 Int128 size;
63 static AddrRange addrrange_make(Int128 start, Int128 size)
65 return (AddrRange) { start, size };
68 static bool addrrange_equal(AddrRange r1, AddrRange r2)
70 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
73 static Int128 addrrange_end(AddrRange r)
75 return int128_add(r.start, r.size);
78 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
80 int128_addto(&range.start, delta);
81 return range;
84 static bool addrrange_contains(AddrRange range, Int128 addr)
86 return int128_ge(addr, range.start)
87 && int128_lt(addr, addrrange_end(range));
90 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92 return addrrange_contains(r1, r2.start)
93 || addrrange_contains(r2, r1.start);
96 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98 Int128 start = int128_max(r1.start, r2.start);
99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
100 return addrrange_make(start, int128_sub(end, start));
103 enum ListenerDirection { Forward, Reverse };
105 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
106 do { \
107 MemoryListener *_listener; \
109 switch (_direction) { \
110 case Forward: \
111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
112 if (_listener->_callback) { \
113 _listener->_callback(_listener, ##_args); \
116 break; \
117 case Reverse: \
118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
119 memory_listeners, link) { \
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
124 break; \
125 default: \
126 abort(); \
128 } while (0)
130 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
131 do { \
132 MemoryListener *_listener; \
133 struct memory_listeners_as *list = &(_as)->listeners; \
135 switch (_direction) { \
136 case Forward: \
137 QTAILQ_FOREACH(_listener, list, link_as) { \
138 if (_listener->_callback) { \
139 _listener->_callback(_listener, _section, ##_args); \
142 break; \
143 case Reverse: \
144 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
145 link_as) { \
146 if (_listener->_callback) { \
147 _listener->_callback(_listener, _section, ##_args); \
150 break; \
151 default: \
152 abort(); \
154 } while (0)
156 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
157 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
158 do { \
159 MemoryRegionSection mrs = section_from_flat_range(fr, \
160 address_space_to_flatview(as)); \
161 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
162 } while(0)
164 struct CoalescedMemoryRange {
165 AddrRange addr;
166 QTAILQ_ENTRY(CoalescedMemoryRange) link;
169 struct MemoryRegionIoeventfd {
170 AddrRange addr;
171 bool match_data;
172 uint64_t data;
173 EventNotifier *e;
176 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
177 MemoryRegionIoeventfd b)
179 if (int128_lt(a.addr.start, b.addr.start)) {
180 return true;
181 } else if (int128_gt(a.addr.start, b.addr.start)) {
182 return false;
183 } else if (int128_lt(a.addr.size, b.addr.size)) {
184 return true;
185 } else if (int128_gt(a.addr.size, b.addr.size)) {
186 return false;
187 } else if (a.match_data < b.match_data) {
188 return true;
189 } else if (a.match_data > b.match_data) {
190 return false;
191 } else if (a.match_data) {
192 if (a.data < b.data) {
193 return true;
194 } else if (a.data > b.data) {
195 return false;
198 if (a.e < b.e) {
199 return true;
200 } else if (a.e > b.e) {
201 return false;
203 return false;
206 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
207 MemoryRegionIoeventfd b)
209 return !memory_region_ioeventfd_before(a, b)
210 && !memory_region_ioeventfd_before(b, a);
213 typedef struct FlatRange FlatRange;
215 /* Range of memory in the global map. Addresses are absolute. */
216 struct FlatRange {
217 MemoryRegion *mr;
218 hwaddr offset_in_region;
219 AddrRange addr;
220 uint8_t dirty_log_mask;
221 bool romd_mode;
222 bool readonly;
225 /* Flattened global view of current active memory hierarchy. Kept in sorted
226 * order.
228 struct FlatView {
229 struct rcu_head rcu;
230 unsigned ref;
231 FlatRange *ranges;
232 unsigned nr;
233 unsigned nr_allocated;
234 struct AddressSpaceDispatch *dispatch;
235 MemoryRegion *root;
238 typedef struct AddressSpaceOps AddressSpaceOps;
240 #define FOR_EACH_FLAT_RANGE(var, view) \
241 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
243 static inline MemoryRegionSection
244 section_from_flat_range(FlatRange *fr, FlatView *fv)
246 return (MemoryRegionSection) {
247 .mr = fr->mr,
248 .fv = fv,
249 .offset_within_region = fr->offset_in_region,
250 .size = fr->addr.size,
251 .offset_within_address_space = int128_get64(fr->addr.start),
252 .readonly = fr->readonly,
256 static bool flatrange_equal(FlatRange *a, FlatRange *b)
258 return a->mr == b->mr
259 && addrrange_equal(a->addr, b->addr)
260 && a->offset_in_region == b->offset_in_region
261 && a->romd_mode == b->romd_mode
262 && a->readonly == b->readonly;
265 static FlatView *flatview_new(MemoryRegion *mr_root)
267 FlatView *view;
269 view = g_new0(FlatView, 1);
270 view->ref = 1;
271 view->root = mr_root;
272 memory_region_ref(mr_root);
273 trace_flatview_new(view, mr_root);
275 return view;
278 /* Insert a range into a given position. Caller is responsible for maintaining
279 * sorting order.
281 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
283 if (view->nr == view->nr_allocated) {
284 view->nr_allocated = MAX(2 * view->nr, 10);
285 view->ranges = g_realloc(view->ranges,
286 view->nr_allocated * sizeof(*view->ranges));
288 memmove(view->ranges + pos + 1, view->ranges + pos,
289 (view->nr - pos) * sizeof(FlatRange));
290 view->ranges[pos] = *range;
291 memory_region_ref(range->mr);
292 ++view->nr;
295 static void flatview_destroy(FlatView *view)
297 int i;
299 trace_flatview_destroy(view, view->root);
300 if (view->dispatch) {
301 address_space_dispatch_free(view->dispatch);
303 for (i = 0; i < view->nr; i++) {
304 memory_region_unref(view->ranges[i].mr);
306 g_free(view->ranges);
307 memory_region_unref(view->root);
308 g_free(view);
311 static bool flatview_ref(FlatView *view)
313 return atomic_fetch_inc_nonzero(&view->ref) > 0;
316 static void flatview_unref(FlatView *view)
318 if (atomic_fetch_dec(&view->ref) == 1) {
319 trace_flatview_destroy_rcu(view, view->root);
320 assert(view->root);
321 call_rcu(view, flatview_destroy, rcu);
325 FlatView *address_space_to_flatview(AddressSpace *as)
327 return atomic_rcu_read(&as->current_map);
330 AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
332 return fv->dispatch;
335 AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
337 return flatview_to_dispatch(address_space_to_flatview(as));
340 static bool can_merge(FlatRange *r1, FlatRange *r2)
342 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
343 && r1->mr == r2->mr
344 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
345 r1->addr.size),
346 int128_make64(r2->offset_in_region))
347 && r1->dirty_log_mask == r2->dirty_log_mask
348 && r1->romd_mode == r2->romd_mode
349 && r1->readonly == r2->readonly;
352 /* Attempt to simplify a view by merging adjacent ranges */
353 static void flatview_simplify(FlatView *view)
355 unsigned i, j;
357 i = 0;
358 while (i < view->nr) {
359 j = i + 1;
360 while (j < view->nr
361 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
362 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
363 ++j;
365 ++i;
366 memmove(&view->ranges[i], &view->ranges[j],
367 (view->nr - j) * sizeof(view->ranges[j]));
368 view->nr -= j - i;
372 static bool memory_region_big_endian(MemoryRegion *mr)
374 #ifdef TARGET_WORDS_BIGENDIAN
375 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
376 #else
377 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
378 #endif
381 static bool memory_region_wrong_endianness(MemoryRegion *mr)
383 #ifdef TARGET_WORDS_BIGENDIAN
384 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
385 #else
386 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
387 #endif
390 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
392 if (memory_region_wrong_endianness(mr)) {
393 switch (size) {
394 case 1:
395 break;
396 case 2:
397 *data = bswap16(*data);
398 break;
399 case 4:
400 *data = bswap32(*data);
401 break;
402 case 8:
403 *data = bswap64(*data);
404 break;
405 default:
406 abort();
411 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
413 MemoryRegion *root;
414 hwaddr abs_addr = offset;
416 abs_addr += mr->addr;
417 for (root = mr; root->container; ) {
418 root = root->container;
419 abs_addr += root->addr;
422 return abs_addr;
425 static int get_cpu_index(void)
427 if (current_cpu) {
428 return current_cpu->cpu_index;
430 return -1;
433 static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
434 hwaddr addr,
435 uint64_t *value,
436 unsigned size,
437 unsigned shift,
438 uint64_t mask,
439 MemTxAttrs attrs)
441 uint64_t tmp;
443 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
444 if (mr->subpage) {
445 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
446 } else if (mr == &io_mem_notdirty) {
447 /* Accesses to code which has previously been translated into a TB show
448 * up in the MMIO path, as accesses to the io_mem_notdirty
449 * MemoryRegion. */
450 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
451 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
452 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
453 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
455 *value |= (tmp & mask) << shift;
456 return MEMTX_OK;
459 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
460 hwaddr addr,
461 uint64_t *value,
462 unsigned size,
463 unsigned shift,
464 uint64_t mask,
465 MemTxAttrs attrs)
467 uint64_t tmp;
469 tmp = mr->ops->read(mr->opaque, addr, size);
470 if (mr->subpage) {
471 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
472 } else if (mr == &io_mem_notdirty) {
473 /* Accesses to code which has previously been translated into a TB show
474 * up in the MMIO path, as accesses to the io_mem_notdirty
475 * MemoryRegion. */
476 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
477 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
478 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
479 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
481 *value |= (tmp & mask) << shift;
482 return MEMTX_OK;
485 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
486 hwaddr addr,
487 uint64_t *value,
488 unsigned size,
489 unsigned shift,
490 uint64_t mask,
491 MemTxAttrs attrs)
493 uint64_t tmp = 0;
494 MemTxResult r;
496 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
497 if (mr->subpage) {
498 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
499 } else if (mr == &io_mem_notdirty) {
500 /* Accesses to code which has previously been translated into a TB show
501 * up in the MMIO path, as accesses to the io_mem_notdirty
502 * MemoryRegion. */
503 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
504 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
505 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
506 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
508 *value |= (tmp & mask) << shift;
509 return r;
512 static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
513 hwaddr addr,
514 uint64_t *value,
515 unsigned size,
516 unsigned shift,
517 uint64_t mask,
518 MemTxAttrs attrs)
520 uint64_t tmp;
522 tmp = (*value >> shift) & mask;
523 if (mr->subpage) {
524 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
525 } else if (mr == &io_mem_notdirty) {
526 /* Accesses to code which has previously been translated into a TB show
527 * up in the MMIO path, as accesses to the io_mem_notdirty
528 * MemoryRegion. */
529 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
530 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
531 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
532 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
534 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
535 return MEMTX_OK;
538 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
539 hwaddr addr,
540 uint64_t *value,
541 unsigned size,
542 unsigned shift,
543 uint64_t mask,
544 MemTxAttrs attrs)
546 uint64_t tmp;
548 tmp = (*value >> shift) & mask;
549 if (mr->subpage) {
550 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
551 } else if (mr == &io_mem_notdirty) {
552 /* Accesses to code which has previously been translated into a TB show
553 * up in the MMIO path, as accesses to the io_mem_notdirty
554 * MemoryRegion. */
555 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
556 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
557 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
558 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
560 mr->ops->write(mr->opaque, addr, tmp, size);
561 return MEMTX_OK;
564 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
565 hwaddr addr,
566 uint64_t *value,
567 unsigned size,
568 unsigned shift,
569 uint64_t mask,
570 MemTxAttrs attrs)
572 uint64_t tmp;
574 tmp = (*value >> shift) & mask;
575 if (mr->subpage) {
576 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
577 } else if (mr == &io_mem_notdirty) {
578 /* Accesses to code which has previously been translated into a TB show
579 * up in the MMIO path, as accesses to the io_mem_notdirty
580 * MemoryRegion. */
581 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
582 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
583 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
584 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
586 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
589 static MemTxResult access_with_adjusted_size(hwaddr addr,
590 uint64_t *value,
591 unsigned size,
592 unsigned access_size_min,
593 unsigned access_size_max,
594 MemTxResult (*access_fn)
595 (MemoryRegion *mr,
596 hwaddr addr,
597 uint64_t *value,
598 unsigned size,
599 unsigned shift,
600 uint64_t mask,
601 MemTxAttrs attrs),
602 MemoryRegion *mr,
603 MemTxAttrs attrs)
605 uint64_t access_mask;
606 unsigned access_size;
607 unsigned i;
608 MemTxResult r = MEMTX_OK;
610 if (!access_size_min) {
611 access_size_min = 1;
613 if (!access_size_max) {
614 access_size_max = 4;
617 /* FIXME: support unaligned access? */
618 access_size = MAX(MIN(size, access_size_max), access_size_min);
619 access_mask = -1ULL >> (64 - access_size * 8);
620 if (memory_region_big_endian(mr)) {
621 for (i = 0; i < size; i += access_size) {
622 r |= access_fn(mr, addr + i, value, access_size,
623 (size - access_size - i) * 8, access_mask, attrs);
625 } else {
626 for (i = 0; i < size; i += access_size) {
627 r |= access_fn(mr, addr + i, value, access_size, i * 8,
628 access_mask, attrs);
631 return r;
634 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
636 AddressSpace *as;
638 while (mr->container) {
639 mr = mr->container;
641 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
642 if (mr == as->root) {
643 return as;
646 return NULL;
649 /* Render a memory region into the global view. Ranges in @view obscure
650 * ranges in @mr.
652 static void render_memory_region(FlatView *view,
653 MemoryRegion *mr,
654 Int128 base,
655 AddrRange clip,
656 bool readonly)
658 MemoryRegion *subregion;
659 unsigned i;
660 hwaddr offset_in_region;
661 Int128 remain;
662 Int128 now;
663 FlatRange fr;
664 AddrRange tmp;
666 if (!mr->enabled) {
667 return;
670 int128_addto(&base, int128_make64(mr->addr));
671 readonly |= mr->readonly;
673 tmp = addrrange_make(base, mr->size);
675 if (!addrrange_intersects(tmp, clip)) {
676 return;
679 clip = addrrange_intersection(tmp, clip);
681 if (mr->alias) {
682 int128_subfrom(&base, int128_make64(mr->alias->addr));
683 int128_subfrom(&base, int128_make64(mr->alias_offset));
684 render_memory_region(view, mr->alias, base, clip, readonly);
685 return;
688 /* Render subregions in priority order. */
689 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
690 render_memory_region(view, subregion, base, clip, readonly);
693 if (!mr->terminates) {
694 return;
697 offset_in_region = int128_get64(int128_sub(clip.start, base));
698 base = clip.start;
699 remain = clip.size;
701 fr.mr = mr;
702 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
703 fr.romd_mode = mr->romd_mode;
704 fr.readonly = readonly;
706 /* Render the region itself into any gaps left by the current view. */
707 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
708 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
709 continue;
711 if (int128_lt(base, view->ranges[i].addr.start)) {
712 now = int128_min(remain,
713 int128_sub(view->ranges[i].addr.start, base));
714 fr.offset_in_region = offset_in_region;
715 fr.addr = addrrange_make(base, now);
716 flatview_insert(view, i, &fr);
717 ++i;
718 int128_addto(&base, now);
719 offset_in_region += int128_get64(now);
720 int128_subfrom(&remain, now);
722 now = int128_sub(int128_min(int128_add(base, remain),
723 addrrange_end(view->ranges[i].addr)),
724 base);
725 int128_addto(&base, now);
726 offset_in_region += int128_get64(now);
727 int128_subfrom(&remain, now);
729 if (int128_nz(remain)) {
730 fr.offset_in_region = offset_in_region;
731 fr.addr = addrrange_make(base, remain);
732 flatview_insert(view, i, &fr);
736 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
738 while (mr->enabled) {
739 if (mr->alias) {
740 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
741 /* The alias is included in its entirety. Use it as
742 * the "real" root, so that we can share more FlatViews.
744 mr = mr->alias;
745 continue;
747 } else if (!mr->terminates) {
748 unsigned int found = 0;
749 MemoryRegion *child, *next = NULL;
750 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
751 if (child->enabled) {
752 if (++found > 1) {
753 next = NULL;
754 break;
756 if (!child->addr && int128_ge(mr->size, child->size)) {
757 /* A child is included in its entirety. If it's the only
758 * enabled one, use it in the hope of finding an alias down the
759 * way. This will also let us share FlatViews.
761 next = child;
765 if (found == 0) {
766 return NULL;
768 if (next) {
769 mr = next;
770 continue;
774 return mr;
777 return NULL;
780 /* Render a memory topology into a list of disjoint absolute ranges. */
781 static FlatView *generate_memory_topology(MemoryRegion *mr)
783 int i;
784 FlatView *view;
786 view = flatview_new(mr);
788 if (mr) {
789 render_memory_region(view, mr, int128_zero(),
790 addrrange_make(int128_zero(), int128_2_64()), false);
792 flatview_simplify(view);
794 view->dispatch = address_space_dispatch_new(view);
795 for (i = 0; i < view->nr; i++) {
796 MemoryRegionSection mrs =
797 section_from_flat_range(&view->ranges[i], view);
798 flatview_add_to_dispatch(view, &mrs);
800 address_space_dispatch_compact(view->dispatch);
801 g_hash_table_replace(flat_views, mr, view);
803 return view;
806 static void address_space_add_del_ioeventfds(AddressSpace *as,
807 MemoryRegionIoeventfd *fds_new,
808 unsigned fds_new_nb,
809 MemoryRegionIoeventfd *fds_old,
810 unsigned fds_old_nb)
812 unsigned iold, inew;
813 MemoryRegionIoeventfd *fd;
814 MemoryRegionSection section;
816 /* Generate a symmetric difference of the old and new fd sets, adding
817 * and deleting as necessary.
820 iold = inew = 0;
821 while (iold < fds_old_nb || inew < fds_new_nb) {
822 if (iold < fds_old_nb
823 && (inew == fds_new_nb
824 || memory_region_ioeventfd_before(fds_old[iold],
825 fds_new[inew]))) {
826 fd = &fds_old[iold];
827 section = (MemoryRegionSection) {
828 .fv = address_space_to_flatview(as),
829 .offset_within_address_space = int128_get64(fd->addr.start),
830 .size = fd->addr.size,
832 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
833 fd->match_data, fd->data, fd->e);
834 ++iold;
835 } else if (inew < fds_new_nb
836 && (iold == fds_old_nb
837 || memory_region_ioeventfd_before(fds_new[inew],
838 fds_old[iold]))) {
839 fd = &fds_new[inew];
840 section = (MemoryRegionSection) {
841 .fv = address_space_to_flatview(as),
842 .offset_within_address_space = int128_get64(fd->addr.start),
843 .size = fd->addr.size,
845 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
846 fd->match_data, fd->data, fd->e);
847 ++inew;
848 } else {
849 ++iold;
850 ++inew;
855 static FlatView *address_space_get_flatview(AddressSpace *as)
857 FlatView *view;
859 rcu_read_lock();
860 do {
861 view = address_space_to_flatview(as);
862 /* If somebody has replaced as->current_map concurrently,
863 * flatview_ref returns false.
865 } while (!flatview_ref(view));
866 rcu_read_unlock();
867 return view;
870 static void address_space_update_ioeventfds(AddressSpace *as)
872 FlatView *view;
873 FlatRange *fr;
874 unsigned ioeventfd_nb = 0;
875 MemoryRegionIoeventfd *ioeventfds = NULL;
876 AddrRange tmp;
877 unsigned i;
879 view = address_space_get_flatview(as);
880 FOR_EACH_FLAT_RANGE(fr, view) {
881 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
882 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
883 int128_sub(fr->addr.start,
884 int128_make64(fr->offset_in_region)));
885 if (addrrange_intersects(fr->addr, tmp)) {
886 ++ioeventfd_nb;
887 ioeventfds = g_realloc(ioeventfds,
888 ioeventfd_nb * sizeof(*ioeventfds));
889 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
890 ioeventfds[ioeventfd_nb-1].addr = tmp;
895 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
896 as->ioeventfds, as->ioeventfd_nb);
898 g_free(as->ioeventfds);
899 as->ioeventfds = ioeventfds;
900 as->ioeventfd_nb = ioeventfd_nb;
901 flatview_unref(view);
904 static void address_space_update_topology_pass(AddressSpace *as,
905 const FlatView *old_view,
906 const FlatView *new_view,
907 bool adding)
909 unsigned iold, inew;
910 FlatRange *frold, *frnew;
912 /* Generate a symmetric difference of the old and new memory maps.
913 * Kill ranges in the old map, and instantiate ranges in the new map.
915 iold = inew = 0;
916 while (iold < old_view->nr || inew < new_view->nr) {
917 if (iold < old_view->nr) {
918 frold = &old_view->ranges[iold];
919 } else {
920 frold = NULL;
922 if (inew < new_view->nr) {
923 frnew = &new_view->ranges[inew];
924 } else {
925 frnew = NULL;
928 if (frold
929 && (!frnew
930 || int128_lt(frold->addr.start, frnew->addr.start)
931 || (int128_eq(frold->addr.start, frnew->addr.start)
932 && !flatrange_equal(frold, frnew)))) {
933 /* In old but not in new, or in both but attributes changed. */
935 if (!adding) {
936 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
939 ++iold;
940 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
941 /* In both and unchanged (except logging may have changed) */
943 if (adding) {
944 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
945 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
946 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
947 frold->dirty_log_mask,
948 frnew->dirty_log_mask);
950 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
951 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
952 frold->dirty_log_mask,
953 frnew->dirty_log_mask);
957 ++iold;
958 ++inew;
959 } else {
960 /* In new */
962 if (adding) {
963 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
966 ++inew;
971 static void flatviews_init(void)
973 static FlatView *empty_view;
975 if (flat_views) {
976 return;
979 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
980 (GDestroyNotify) flatview_unref);
981 if (!empty_view) {
982 empty_view = generate_memory_topology(NULL);
983 /* We keep it alive forever in the global variable. */
984 flatview_ref(empty_view);
985 } else {
986 g_hash_table_replace(flat_views, NULL, empty_view);
987 flatview_ref(empty_view);
991 static void flatviews_reset(void)
993 AddressSpace *as;
995 if (flat_views) {
996 g_hash_table_unref(flat_views);
997 flat_views = NULL;
999 flatviews_init();
1001 /* Render unique FVs */
1002 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1003 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1005 if (g_hash_table_lookup(flat_views, physmr)) {
1006 continue;
1009 generate_memory_topology(physmr);
1013 static void address_space_set_flatview(AddressSpace *as)
1015 FlatView *old_view = address_space_to_flatview(as);
1016 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1017 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1019 assert(new_view);
1021 if (old_view == new_view) {
1022 return;
1025 if (old_view) {
1026 flatview_ref(old_view);
1029 flatview_ref(new_view);
1031 if (!QTAILQ_EMPTY(&as->listeners)) {
1032 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1034 if (!old_view2) {
1035 old_view2 = &tmpview;
1037 address_space_update_topology_pass(as, old_view2, new_view, false);
1038 address_space_update_topology_pass(as, old_view2, new_view, true);
1041 /* Writes are protected by the BQL. */
1042 atomic_rcu_set(&as->current_map, new_view);
1043 if (old_view) {
1044 flatview_unref(old_view);
1047 /* Note that all the old MemoryRegions are still alive up to this
1048 * point. This relieves most MemoryListeners from the need to
1049 * ref/unref the MemoryRegions they get---unless they use them
1050 * outside the iothread mutex, in which case precise reference
1051 * counting is necessary.
1053 if (old_view) {
1054 flatview_unref(old_view);
1058 static void address_space_update_topology(AddressSpace *as)
1060 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1062 flatviews_init();
1063 if (!g_hash_table_lookup(flat_views, physmr)) {
1064 generate_memory_topology(physmr);
1066 address_space_set_flatview(as);
1069 void memory_region_transaction_begin(void)
1071 qemu_flush_coalesced_mmio_buffer();
1072 ++memory_region_transaction_depth;
1075 void memory_region_transaction_commit(void)
1077 AddressSpace *as;
1079 assert(memory_region_transaction_depth);
1080 assert(qemu_mutex_iothread_locked());
1082 --memory_region_transaction_depth;
1083 if (!memory_region_transaction_depth) {
1084 if (memory_region_update_pending) {
1085 flatviews_reset();
1087 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1089 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1090 address_space_set_flatview(as);
1091 address_space_update_ioeventfds(as);
1093 memory_region_update_pending = false;
1094 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1095 } else if (ioeventfd_update_pending) {
1096 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1097 address_space_update_ioeventfds(as);
1099 ioeventfd_update_pending = false;
1104 static void memory_region_destructor_none(MemoryRegion *mr)
1108 static void memory_region_destructor_ram(MemoryRegion *mr)
1110 qemu_ram_free(mr->ram_block);
1113 static bool memory_region_need_escape(char c)
1115 return c == '/' || c == '[' || c == '\\' || c == ']';
1118 static char *memory_region_escape_name(const char *name)
1120 const char *p;
1121 char *escaped, *q;
1122 uint8_t c;
1123 size_t bytes = 0;
1125 for (p = name; *p; p++) {
1126 bytes += memory_region_need_escape(*p) ? 4 : 1;
1128 if (bytes == p - name) {
1129 return g_memdup(name, bytes + 1);
1132 escaped = g_malloc(bytes + 1);
1133 for (p = name, q = escaped; *p; p++) {
1134 c = *p;
1135 if (unlikely(memory_region_need_escape(c))) {
1136 *q++ = '\\';
1137 *q++ = 'x';
1138 *q++ = "0123456789abcdef"[c >> 4];
1139 c = "0123456789abcdef"[c & 15];
1141 *q++ = c;
1143 *q = 0;
1144 return escaped;
1147 static void memory_region_do_init(MemoryRegion *mr,
1148 Object *owner,
1149 const char *name,
1150 uint64_t size)
1152 mr->size = int128_make64(size);
1153 if (size == UINT64_MAX) {
1154 mr->size = int128_2_64();
1156 mr->name = g_strdup(name);
1157 mr->owner = owner;
1158 mr->ram_block = NULL;
1160 if (name) {
1161 char *escaped_name = memory_region_escape_name(name);
1162 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1164 if (!owner) {
1165 owner = container_get(qdev_get_machine(), "/unattached");
1168 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1169 object_unref(OBJECT(mr));
1170 g_free(name_array);
1171 g_free(escaped_name);
1175 void memory_region_init(MemoryRegion *mr,
1176 Object *owner,
1177 const char *name,
1178 uint64_t size)
1180 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1181 memory_region_do_init(mr, owner, name, size);
1184 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1185 void *opaque, Error **errp)
1187 MemoryRegion *mr = MEMORY_REGION(obj);
1188 uint64_t value = mr->addr;
1190 visit_type_uint64(v, name, &value, errp);
1193 static void memory_region_get_container(Object *obj, Visitor *v,
1194 const char *name, void *opaque,
1195 Error **errp)
1197 MemoryRegion *mr = MEMORY_REGION(obj);
1198 gchar *path = (gchar *)"";
1200 if (mr->container) {
1201 path = object_get_canonical_path(OBJECT(mr->container));
1203 visit_type_str(v, name, &path, errp);
1204 if (mr->container) {
1205 g_free(path);
1209 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1210 const char *part)
1212 MemoryRegion *mr = MEMORY_REGION(obj);
1214 return OBJECT(mr->container);
1217 static void memory_region_get_priority(Object *obj, Visitor *v,
1218 const char *name, void *opaque,
1219 Error **errp)
1221 MemoryRegion *mr = MEMORY_REGION(obj);
1222 int32_t value = mr->priority;
1224 visit_type_int32(v, name, &value, errp);
1227 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1228 void *opaque, Error **errp)
1230 MemoryRegion *mr = MEMORY_REGION(obj);
1231 uint64_t value = memory_region_size(mr);
1233 visit_type_uint64(v, name, &value, errp);
1236 static void memory_region_initfn(Object *obj)
1238 MemoryRegion *mr = MEMORY_REGION(obj);
1239 ObjectProperty *op;
1241 mr->ops = &unassigned_mem_ops;
1242 mr->enabled = true;
1243 mr->romd_mode = true;
1244 mr->global_locking = true;
1245 mr->destructor = memory_region_destructor_none;
1246 QTAILQ_INIT(&mr->subregions);
1247 QTAILQ_INIT(&mr->coalesced);
1249 op = object_property_add(OBJECT(mr), "container",
1250 "link<" TYPE_MEMORY_REGION ">",
1251 memory_region_get_container,
1252 NULL, /* memory_region_set_container */
1253 NULL, NULL, &error_abort);
1254 op->resolve = memory_region_resolve_container;
1256 object_property_add(OBJECT(mr), "addr", "uint64",
1257 memory_region_get_addr,
1258 NULL, /* memory_region_set_addr */
1259 NULL, NULL, &error_abort);
1260 object_property_add(OBJECT(mr), "priority", "uint32",
1261 memory_region_get_priority,
1262 NULL, /* memory_region_set_priority */
1263 NULL, NULL, &error_abort);
1264 object_property_add(OBJECT(mr), "size", "uint64",
1265 memory_region_get_size,
1266 NULL, /* memory_region_set_size, */
1267 NULL, NULL, &error_abort);
1270 static void iommu_memory_region_initfn(Object *obj)
1272 MemoryRegion *mr = MEMORY_REGION(obj);
1274 mr->is_iommu = true;
1277 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1278 unsigned size)
1280 #ifdef DEBUG_UNASSIGNED
1281 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1282 #endif
1283 if (current_cpu != NULL) {
1284 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1286 return 0;
1289 static void unassigned_mem_write(void *opaque, hwaddr addr,
1290 uint64_t val, unsigned size)
1292 #ifdef DEBUG_UNASSIGNED
1293 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1294 #endif
1295 if (current_cpu != NULL) {
1296 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1300 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1301 unsigned size, bool is_write)
1303 return false;
1306 const MemoryRegionOps unassigned_mem_ops = {
1307 .valid.accepts = unassigned_mem_accepts,
1308 .endianness = DEVICE_NATIVE_ENDIAN,
1311 static uint64_t memory_region_ram_device_read(void *opaque,
1312 hwaddr addr, unsigned size)
1314 MemoryRegion *mr = opaque;
1315 uint64_t data = (uint64_t)~0;
1317 switch (size) {
1318 case 1:
1319 data = *(uint8_t *)(mr->ram_block->host + addr);
1320 break;
1321 case 2:
1322 data = *(uint16_t *)(mr->ram_block->host + addr);
1323 break;
1324 case 4:
1325 data = *(uint32_t *)(mr->ram_block->host + addr);
1326 break;
1327 case 8:
1328 data = *(uint64_t *)(mr->ram_block->host + addr);
1329 break;
1332 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1334 return data;
1337 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1338 uint64_t data, unsigned size)
1340 MemoryRegion *mr = opaque;
1342 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1344 switch (size) {
1345 case 1:
1346 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1347 break;
1348 case 2:
1349 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1350 break;
1351 case 4:
1352 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1353 break;
1354 case 8:
1355 *(uint64_t *)(mr->ram_block->host + addr) = data;
1356 break;
1360 static const MemoryRegionOps ram_device_mem_ops = {
1361 .read = memory_region_ram_device_read,
1362 .write = memory_region_ram_device_write,
1363 .endianness = DEVICE_HOST_ENDIAN,
1364 .valid = {
1365 .min_access_size = 1,
1366 .max_access_size = 8,
1367 .unaligned = true,
1369 .impl = {
1370 .min_access_size = 1,
1371 .max_access_size = 8,
1372 .unaligned = true,
1376 bool memory_region_access_valid(MemoryRegion *mr,
1377 hwaddr addr,
1378 unsigned size,
1379 bool is_write)
1381 int access_size_min, access_size_max;
1382 int access_size, i;
1384 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1385 return false;
1388 if (!mr->ops->valid.accepts) {
1389 return true;
1392 access_size_min = mr->ops->valid.min_access_size;
1393 if (!mr->ops->valid.min_access_size) {
1394 access_size_min = 1;
1397 access_size_max = mr->ops->valid.max_access_size;
1398 if (!mr->ops->valid.max_access_size) {
1399 access_size_max = 4;
1402 access_size = MAX(MIN(size, access_size_max), access_size_min);
1403 for (i = 0; i < size; i += access_size) {
1404 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1405 is_write)) {
1406 return false;
1410 return true;
1413 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1414 hwaddr addr,
1415 uint64_t *pval,
1416 unsigned size,
1417 MemTxAttrs attrs)
1419 *pval = 0;
1421 if (mr->ops->read) {
1422 return access_with_adjusted_size(addr, pval, size,
1423 mr->ops->impl.min_access_size,
1424 mr->ops->impl.max_access_size,
1425 memory_region_read_accessor,
1426 mr, attrs);
1427 } else if (mr->ops->read_with_attrs) {
1428 return access_with_adjusted_size(addr, pval, size,
1429 mr->ops->impl.min_access_size,
1430 mr->ops->impl.max_access_size,
1431 memory_region_read_with_attrs_accessor,
1432 mr, attrs);
1433 } else {
1434 return access_with_adjusted_size(addr, pval, size, 1, 4,
1435 memory_region_oldmmio_read_accessor,
1436 mr, attrs);
1440 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1441 hwaddr addr,
1442 uint64_t *pval,
1443 unsigned size,
1444 MemTxAttrs attrs)
1446 MemTxResult r;
1448 if (!memory_region_access_valid(mr, addr, size, false)) {
1449 *pval = unassigned_mem_read(mr, addr, size);
1450 return MEMTX_DECODE_ERROR;
1453 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1454 adjust_endianness(mr, pval, size);
1455 return r;
1458 /* Return true if an eventfd was signalled */
1459 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1460 hwaddr addr,
1461 uint64_t data,
1462 unsigned size,
1463 MemTxAttrs attrs)
1465 MemoryRegionIoeventfd ioeventfd = {
1466 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1467 .data = data,
1469 unsigned i;
1471 for (i = 0; i < mr->ioeventfd_nb; i++) {
1472 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1473 ioeventfd.e = mr->ioeventfds[i].e;
1475 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1476 event_notifier_set(ioeventfd.e);
1477 return true;
1481 return false;
1484 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1485 hwaddr addr,
1486 uint64_t data,
1487 unsigned size,
1488 MemTxAttrs attrs)
1490 if (!memory_region_access_valid(mr, addr, size, true)) {
1491 unassigned_mem_write(mr, addr, data, size);
1492 return MEMTX_DECODE_ERROR;
1495 adjust_endianness(mr, &data, size);
1497 if ((!kvm_eventfds_enabled()) &&
1498 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1499 return MEMTX_OK;
1502 if (mr->ops->write) {
1503 return access_with_adjusted_size(addr, &data, size,
1504 mr->ops->impl.min_access_size,
1505 mr->ops->impl.max_access_size,
1506 memory_region_write_accessor, mr,
1507 attrs);
1508 } else if (mr->ops->write_with_attrs) {
1509 return
1510 access_with_adjusted_size(addr, &data, size,
1511 mr->ops->impl.min_access_size,
1512 mr->ops->impl.max_access_size,
1513 memory_region_write_with_attrs_accessor,
1514 mr, attrs);
1515 } else {
1516 return access_with_adjusted_size(addr, &data, size, 1, 4,
1517 memory_region_oldmmio_write_accessor,
1518 mr, attrs);
1522 void memory_region_init_io(MemoryRegion *mr,
1523 Object *owner,
1524 const MemoryRegionOps *ops,
1525 void *opaque,
1526 const char *name,
1527 uint64_t size)
1529 memory_region_init(mr, owner, name, size);
1530 mr->ops = ops ? ops : &unassigned_mem_ops;
1531 mr->opaque = opaque;
1532 mr->terminates = true;
1535 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1536 Object *owner,
1537 const char *name,
1538 uint64_t size,
1539 Error **errp)
1541 memory_region_init(mr, owner, name, size);
1542 mr->ram = true;
1543 mr->terminates = true;
1544 mr->destructor = memory_region_destructor_ram;
1545 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1546 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1549 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1550 Object *owner,
1551 const char *name,
1552 uint64_t size,
1553 uint64_t max_size,
1554 void (*resized)(const char*,
1555 uint64_t length,
1556 void *host),
1557 Error **errp)
1559 memory_region_init(mr, owner, name, size);
1560 mr->ram = true;
1561 mr->terminates = true;
1562 mr->destructor = memory_region_destructor_ram;
1563 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1564 mr, errp);
1565 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1568 #ifdef __linux__
1569 void memory_region_init_ram_from_file(MemoryRegion *mr,
1570 struct Object *owner,
1571 const char *name,
1572 uint64_t size,
1573 bool share,
1574 const char *path,
1575 Error **errp)
1577 memory_region_init(mr, owner, name, size);
1578 mr->ram = true;
1579 mr->terminates = true;
1580 mr->destructor = memory_region_destructor_ram;
1581 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1582 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1585 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1586 struct Object *owner,
1587 const char *name,
1588 uint64_t size,
1589 bool share,
1590 int fd,
1591 Error **errp)
1593 memory_region_init(mr, owner, name, size);
1594 mr->ram = true;
1595 mr->terminates = true;
1596 mr->destructor = memory_region_destructor_ram;
1597 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1598 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1600 #endif
1602 void memory_region_init_ram_ptr(MemoryRegion *mr,
1603 Object *owner,
1604 const char *name,
1605 uint64_t size,
1606 void *ptr)
1608 memory_region_init(mr, owner, name, size);
1609 mr->ram = true;
1610 mr->terminates = true;
1611 mr->destructor = memory_region_destructor_ram;
1612 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1614 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1615 assert(ptr != NULL);
1616 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1619 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1620 Object *owner,
1621 const char *name,
1622 uint64_t size,
1623 void *ptr)
1625 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1626 mr->ram_device = true;
1627 mr->ops = &ram_device_mem_ops;
1628 mr->opaque = mr;
1631 void memory_region_init_alias(MemoryRegion *mr,
1632 Object *owner,
1633 const char *name,
1634 MemoryRegion *orig,
1635 hwaddr offset,
1636 uint64_t size)
1638 memory_region_init(mr, owner, name, size);
1639 mr->alias = orig;
1640 mr->alias_offset = offset;
1643 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1644 struct Object *owner,
1645 const char *name,
1646 uint64_t size,
1647 Error **errp)
1649 memory_region_init(mr, owner, name, size);
1650 mr->ram = true;
1651 mr->readonly = true;
1652 mr->terminates = true;
1653 mr->destructor = memory_region_destructor_ram;
1654 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1655 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1658 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1659 Object *owner,
1660 const MemoryRegionOps *ops,
1661 void *opaque,
1662 const char *name,
1663 uint64_t size,
1664 Error **errp)
1666 assert(ops);
1667 memory_region_init(mr, owner, name, size);
1668 mr->ops = ops;
1669 mr->opaque = opaque;
1670 mr->terminates = true;
1671 mr->rom_device = true;
1672 mr->destructor = memory_region_destructor_ram;
1673 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1676 void memory_region_init_iommu(void *_iommu_mr,
1677 size_t instance_size,
1678 const char *mrtypename,
1679 Object *owner,
1680 const char *name,
1681 uint64_t size)
1683 struct IOMMUMemoryRegion *iommu_mr;
1684 struct MemoryRegion *mr;
1686 object_initialize(_iommu_mr, instance_size, mrtypename);
1687 mr = MEMORY_REGION(_iommu_mr);
1688 memory_region_do_init(mr, owner, name, size);
1689 iommu_mr = IOMMU_MEMORY_REGION(mr);
1690 mr->terminates = true; /* then re-forwards */
1691 QLIST_INIT(&iommu_mr->iommu_notify);
1692 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1695 static void memory_region_finalize(Object *obj)
1697 MemoryRegion *mr = MEMORY_REGION(obj);
1699 assert(!mr->container);
1701 /* We know the region is not visible in any address space (it
1702 * does not have a container and cannot be a root either because
1703 * it has no references, so we can blindly clear mr->enabled.
1704 * memory_region_set_enabled instead could trigger a transaction
1705 * and cause an infinite loop.
1707 mr->enabled = false;
1708 memory_region_transaction_begin();
1709 while (!QTAILQ_EMPTY(&mr->subregions)) {
1710 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1711 memory_region_del_subregion(mr, subregion);
1713 memory_region_transaction_commit();
1715 mr->destructor(mr);
1716 memory_region_clear_coalescing(mr);
1717 g_free((char *)mr->name);
1718 g_free(mr->ioeventfds);
1721 Object *memory_region_owner(MemoryRegion *mr)
1723 Object *obj = OBJECT(mr);
1724 return obj->parent;
1727 void memory_region_ref(MemoryRegion *mr)
1729 /* MMIO callbacks most likely will access data that belongs
1730 * to the owner, hence the need to ref/unref the owner whenever
1731 * the memory region is in use.
1733 * The memory region is a child of its owner. As long as the
1734 * owner doesn't call unparent itself on the memory region,
1735 * ref-ing the owner will also keep the memory region alive.
1736 * Memory regions without an owner are supposed to never go away;
1737 * we do not ref/unref them because it slows down DMA sensibly.
1739 if (mr && mr->owner) {
1740 object_ref(mr->owner);
1744 void memory_region_unref(MemoryRegion *mr)
1746 if (mr && mr->owner) {
1747 object_unref(mr->owner);
1751 uint64_t memory_region_size(MemoryRegion *mr)
1753 if (int128_eq(mr->size, int128_2_64())) {
1754 return UINT64_MAX;
1756 return int128_get64(mr->size);
1759 const char *memory_region_name(const MemoryRegion *mr)
1761 if (!mr->name) {
1762 ((MemoryRegion *)mr)->name =
1763 object_get_canonical_path_component(OBJECT(mr));
1765 return mr->name;
1768 bool memory_region_is_ram_device(MemoryRegion *mr)
1770 return mr->ram_device;
1773 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1775 uint8_t mask = mr->dirty_log_mask;
1776 if (global_dirty_log && mr->ram_block) {
1777 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1779 return mask;
1782 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1784 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1787 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1789 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1790 IOMMUNotifier *iommu_notifier;
1791 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1793 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1794 flags |= iommu_notifier->notifier_flags;
1797 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1798 imrc->notify_flag_changed(iommu_mr,
1799 iommu_mr->iommu_notify_flags,
1800 flags);
1803 iommu_mr->iommu_notify_flags = flags;
1806 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1807 IOMMUNotifier *n)
1809 IOMMUMemoryRegion *iommu_mr;
1811 if (mr->alias) {
1812 memory_region_register_iommu_notifier(mr->alias, n);
1813 return;
1816 /* We need to register for at least one bitfield */
1817 iommu_mr = IOMMU_MEMORY_REGION(mr);
1818 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1819 assert(n->start <= n->end);
1820 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1821 memory_region_update_iommu_notify_flags(iommu_mr);
1824 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1826 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1828 if (imrc->get_min_page_size) {
1829 return imrc->get_min_page_size(iommu_mr);
1831 return TARGET_PAGE_SIZE;
1834 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1836 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1837 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1838 hwaddr addr, granularity;
1839 IOMMUTLBEntry iotlb;
1841 /* If the IOMMU has its own replay callback, override */
1842 if (imrc->replay) {
1843 imrc->replay(iommu_mr, n);
1844 return;
1847 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1849 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1850 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
1851 if (iotlb.perm != IOMMU_NONE) {
1852 n->notify(n, &iotlb);
1855 /* if (2^64 - MR size) < granularity, it's possible to get an
1856 * infinite loop here. This should catch such a wraparound */
1857 if ((addr + granularity) < addr) {
1858 break;
1863 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
1865 IOMMUNotifier *notifier;
1867 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1868 memory_region_iommu_replay(iommu_mr, notifier);
1872 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1873 IOMMUNotifier *n)
1875 IOMMUMemoryRegion *iommu_mr;
1877 if (mr->alias) {
1878 memory_region_unregister_iommu_notifier(mr->alias, n);
1879 return;
1881 QLIST_REMOVE(n, node);
1882 iommu_mr = IOMMU_MEMORY_REGION(mr);
1883 memory_region_update_iommu_notify_flags(iommu_mr);
1886 void memory_region_notify_one(IOMMUNotifier *notifier,
1887 IOMMUTLBEntry *entry)
1889 IOMMUNotifierFlag request_flags;
1892 * Skip the notification if the notification does not overlap
1893 * with registered range.
1895 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1896 notifier->end < entry->iova) {
1897 return;
1900 if (entry->perm & IOMMU_RW) {
1901 request_flags = IOMMU_NOTIFIER_MAP;
1902 } else {
1903 request_flags = IOMMU_NOTIFIER_UNMAP;
1906 if (notifier->notifier_flags & request_flags) {
1907 notifier->notify(notifier, entry);
1911 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1912 IOMMUTLBEntry entry)
1914 IOMMUNotifier *iommu_notifier;
1916 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1918 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1919 memory_region_notify_one(iommu_notifier, &entry);
1923 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1925 uint8_t mask = 1 << client;
1926 uint8_t old_logging;
1928 assert(client == DIRTY_MEMORY_VGA);
1929 old_logging = mr->vga_logging_count;
1930 mr->vga_logging_count += log ? 1 : -1;
1931 if (!!old_logging == !!mr->vga_logging_count) {
1932 return;
1935 memory_region_transaction_begin();
1936 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1937 memory_region_update_pending |= mr->enabled;
1938 memory_region_transaction_commit();
1941 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1942 hwaddr size, unsigned client)
1944 assert(mr->ram_block);
1945 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1946 size, client);
1949 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1950 hwaddr size)
1952 assert(mr->ram_block);
1953 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1954 size,
1955 memory_region_get_dirty_log_mask(mr));
1958 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1959 hwaddr size, unsigned client)
1961 assert(mr->ram_block);
1962 return cpu_physical_memory_test_and_clear_dirty(
1963 memory_region_get_ram_addr(mr) + addr, size, client);
1966 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1967 hwaddr addr,
1968 hwaddr size,
1969 unsigned client)
1971 assert(mr->ram_block);
1972 return cpu_physical_memory_snapshot_and_clear_dirty(
1973 memory_region_get_ram_addr(mr) + addr, size, client);
1976 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1977 hwaddr addr, hwaddr size)
1979 assert(mr->ram_block);
1980 return cpu_physical_memory_snapshot_get_dirty(snap,
1981 memory_region_get_ram_addr(mr) + addr, size);
1984 void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1986 MemoryListener *listener;
1987 AddressSpace *as;
1988 FlatView *view;
1989 FlatRange *fr;
1991 /* If the same address space has multiple log_sync listeners, we
1992 * visit that address space's FlatView multiple times. But because
1993 * log_sync listeners are rare, it's still cheaper than walking each
1994 * address space once.
1996 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1997 if (!listener->log_sync) {
1998 continue;
2000 as = listener->address_space;
2001 view = address_space_get_flatview(as);
2002 FOR_EACH_FLAT_RANGE(fr, view) {
2003 if (fr->mr == mr) {
2004 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2005 listener->log_sync(listener, &mrs);
2008 flatview_unref(view);
2012 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2014 if (mr->readonly != readonly) {
2015 memory_region_transaction_begin();
2016 mr->readonly = readonly;
2017 memory_region_update_pending |= mr->enabled;
2018 memory_region_transaction_commit();
2022 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2024 if (mr->romd_mode != romd_mode) {
2025 memory_region_transaction_begin();
2026 mr->romd_mode = romd_mode;
2027 memory_region_update_pending |= mr->enabled;
2028 memory_region_transaction_commit();
2032 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2033 hwaddr size, unsigned client)
2035 assert(mr->ram_block);
2036 cpu_physical_memory_test_and_clear_dirty(
2037 memory_region_get_ram_addr(mr) + addr, size, client);
2040 int memory_region_get_fd(MemoryRegion *mr)
2042 int fd;
2044 rcu_read_lock();
2045 while (mr->alias) {
2046 mr = mr->alias;
2048 fd = mr->ram_block->fd;
2049 rcu_read_unlock();
2051 return fd;
2054 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2056 void *ptr;
2057 uint64_t offset = 0;
2059 rcu_read_lock();
2060 while (mr->alias) {
2061 offset += mr->alias_offset;
2062 mr = mr->alias;
2064 assert(mr->ram_block);
2065 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2066 rcu_read_unlock();
2068 return ptr;
2071 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2073 RAMBlock *block;
2075 block = qemu_ram_block_from_host(ptr, false, offset);
2076 if (!block) {
2077 return NULL;
2080 return block->mr;
2083 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2085 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2088 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2090 assert(mr->ram_block);
2092 qemu_ram_resize(mr->ram_block, newsize, errp);
2095 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
2097 FlatView *view;
2098 FlatRange *fr;
2099 CoalescedMemoryRange *cmr;
2100 AddrRange tmp;
2101 MemoryRegionSection section;
2103 view = address_space_get_flatview(as);
2104 FOR_EACH_FLAT_RANGE(fr, view) {
2105 if (fr->mr == mr) {
2106 section = (MemoryRegionSection) {
2107 .fv = view,
2108 .offset_within_address_space = int128_get64(fr->addr.start),
2109 .size = fr->addr.size,
2112 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
2113 int128_get64(fr->addr.start),
2114 int128_get64(fr->addr.size));
2115 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
2116 tmp = addrrange_shift(cmr->addr,
2117 int128_sub(fr->addr.start,
2118 int128_make64(fr->offset_in_region)));
2119 if (!addrrange_intersects(tmp, fr->addr)) {
2120 continue;
2122 tmp = addrrange_intersection(tmp, fr->addr);
2123 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
2124 int128_get64(tmp.start),
2125 int128_get64(tmp.size));
2129 flatview_unref(view);
2132 static void memory_region_update_coalesced_range(MemoryRegion *mr)
2134 AddressSpace *as;
2136 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2137 memory_region_update_coalesced_range_as(mr, as);
2141 void memory_region_set_coalescing(MemoryRegion *mr)
2143 memory_region_clear_coalescing(mr);
2144 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2147 void memory_region_add_coalescing(MemoryRegion *mr,
2148 hwaddr offset,
2149 uint64_t size)
2151 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2153 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2154 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2155 memory_region_update_coalesced_range(mr);
2156 memory_region_set_flush_coalesced(mr);
2159 void memory_region_clear_coalescing(MemoryRegion *mr)
2161 CoalescedMemoryRange *cmr;
2162 bool updated = false;
2164 qemu_flush_coalesced_mmio_buffer();
2165 mr->flush_coalesced_mmio = false;
2167 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2168 cmr = QTAILQ_FIRST(&mr->coalesced);
2169 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2170 g_free(cmr);
2171 updated = true;
2174 if (updated) {
2175 memory_region_update_coalesced_range(mr);
2179 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2181 mr->flush_coalesced_mmio = true;
2184 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2186 qemu_flush_coalesced_mmio_buffer();
2187 if (QTAILQ_EMPTY(&mr->coalesced)) {
2188 mr->flush_coalesced_mmio = false;
2192 void memory_region_set_global_locking(MemoryRegion *mr)
2194 mr->global_locking = true;
2197 void memory_region_clear_global_locking(MemoryRegion *mr)
2199 mr->global_locking = false;
2202 static bool userspace_eventfd_warning;
2204 void memory_region_add_eventfd(MemoryRegion *mr,
2205 hwaddr addr,
2206 unsigned size,
2207 bool match_data,
2208 uint64_t data,
2209 EventNotifier *e)
2211 MemoryRegionIoeventfd mrfd = {
2212 .addr.start = int128_make64(addr),
2213 .addr.size = int128_make64(size),
2214 .match_data = match_data,
2215 .data = data,
2216 .e = e,
2218 unsigned i;
2220 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2221 userspace_eventfd_warning))) {
2222 userspace_eventfd_warning = true;
2223 error_report("Using eventfd without MMIO binding in KVM. "
2224 "Suboptimal performance expected");
2227 if (size) {
2228 adjust_endianness(mr, &mrfd.data, size);
2230 memory_region_transaction_begin();
2231 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2232 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2233 break;
2236 ++mr->ioeventfd_nb;
2237 mr->ioeventfds = g_realloc(mr->ioeventfds,
2238 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2239 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2240 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2241 mr->ioeventfds[i] = mrfd;
2242 ioeventfd_update_pending |= mr->enabled;
2243 memory_region_transaction_commit();
2246 void memory_region_del_eventfd(MemoryRegion *mr,
2247 hwaddr addr,
2248 unsigned size,
2249 bool match_data,
2250 uint64_t data,
2251 EventNotifier *e)
2253 MemoryRegionIoeventfd mrfd = {
2254 .addr.start = int128_make64(addr),
2255 .addr.size = int128_make64(size),
2256 .match_data = match_data,
2257 .data = data,
2258 .e = e,
2260 unsigned i;
2262 if (size) {
2263 adjust_endianness(mr, &mrfd.data, size);
2265 memory_region_transaction_begin();
2266 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2267 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2268 break;
2271 assert(i != mr->ioeventfd_nb);
2272 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2273 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2274 --mr->ioeventfd_nb;
2275 mr->ioeventfds = g_realloc(mr->ioeventfds,
2276 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2277 ioeventfd_update_pending |= mr->enabled;
2278 memory_region_transaction_commit();
2281 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2283 MemoryRegion *mr = subregion->container;
2284 MemoryRegion *other;
2286 memory_region_transaction_begin();
2288 memory_region_ref(subregion);
2289 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2290 if (subregion->priority >= other->priority) {
2291 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2292 goto done;
2295 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2296 done:
2297 memory_region_update_pending |= mr->enabled && subregion->enabled;
2298 memory_region_transaction_commit();
2301 static void memory_region_add_subregion_common(MemoryRegion *mr,
2302 hwaddr offset,
2303 MemoryRegion *subregion)
2305 assert(!subregion->container);
2306 subregion->container = mr;
2307 subregion->addr = offset;
2308 memory_region_update_container_subregions(subregion);
2311 void memory_region_add_subregion(MemoryRegion *mr,
2312 hwaddr offset,
2313 MemoryRegion *subregion)
2315 subregion->priority = 0;
2316 memory_region_add_subregion_common(mr, offset, subregion);
2319 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2320 hwaddr offset,
2321 MemoryRegion *subregion,
2322 int priority)
2324 subregion->priority = priority;
2325 memory_region_add_subregion_common(mr, offset, subregion);
2328 void memory_region_del_subregion(MemoryRegion *mr,
2329 MemoryRegion *subregion)
2331 memory_region_transaction_begin();
2332 assert(subregion->container == mr);
2333 subregion->container = NULL;
2334 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2335 memory_region_unref(subregion);
2336 memory_region_update_pending |= mr->enabled && subregion->enabled;
2337 memory_region_transaction_commit();
2340 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2342 if (enabled == mr->enabled) {
2343 return;
2345 memory_region_transaction_begin();
2346 mr->enabled = enabled;
2347 memory_region_update_pending = true;
2348 memory_region_transaction_commit();
2351 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2353 Int128 s = int128_make64(size);
2355 if (size == UINT64_MAX) {
2356 s = int128_2_64();
2358 if (int128_eq(s, mr->size)) {
2359 return;
2361 memory_region_transaction_begin();
2362 mr->size = s;
2363 memory_region_update_pending = true;
2364 memory_region_transaction_commit();
2367 static void memory_region_readd_subregion(MemoryRegion *mr)
2369 MemoryRegion *container = mr->container;
2371 if (container) {
2372 memory_region_transaction_begin();
2373 memory_region_ref(mr);
2374 memory_region_del_subregion(container, mr);
2375 mr->container = container;
2376 memory_region_update_container_subregions(mr);
2377 memory_region_unref(mr);
2378 memory_region_transaction_commit();
2382 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2384 if (addr != mr->addr) {
2385 mr->addr = addr;
2386 memory_region_readd_subregion(mr);
2390 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2392 assert(mr->alias);
2394 if (offset == mr->alias_offset) {
2395 return;
2398 memory_region_transaction_begin();
2399 mr->alias_offset = offset;
2400 memory_region_update_pending |= mr->enabled;
2401 memory_region_transaction_commit();
2404 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2406 return mr->align;
2409 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2411 const AddrRange *addr = addr_;
2412 const FlatRange *fr = fr_;
2414 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2415 return -1;
2416 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2417 return 1;
2419 return 0;
2422 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2424 return bsearch(&addr, view->ranges, view->nr,
2425 sizeof(FlatRange), cmp_flatrange_addr);
2428 bool memory_region_is_mapped(MemoryRegion *mr)
2430 return mr->container ? true : false;
2433 /* Same as memory_region_find, but it does not add a reference to the
2434 * returned region. It must be called from an RCU critical section.
2436 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2437 hwaddr addr, uint64_t size)
2439 MemoryRegionSection ret = { .mr = NULL };
2440 MemoryRegion *root;
2441 AddressSpace *as;
2442 AddrRange range;
2443 FlatView *view;
2444 FlatRange *fr;
2446 addr += mr->addr;
2447 for (root = mr; root->container; ) {
2448 root = root->container;
2449 addr += root->addr;
2452 as = memory_region_to_address_space(root);
2453 if (!as) {
2454 return ret;
2456 range = addrrange_make(int128_make64(addr), int128_make64(size));
2458 view = address_space_to_flatview(as);
2459 fr = flatview_lookup(view, range);
2460 if (!fr) {
2461 return ret;
2464 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2465 --fr;
2468 ret.mr = fr->mr;
2469 ret.fv = view;
2470 range = addrrange_intersection(range, fr->addr);
2471 ret.offset_within_region = fr->offset_in_region;
2472 ret.offset_within_region += int128_get64(int128_sub(range.start,
2473 fr->addr.start));
2474 ret.size = range.size;
2475 ret.offset_within_address_space = int128_get64(range.start);
2476 ret.readonly = fr->readonly;
2477 return ret;
2480 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2481 hwaddr addr, uint64_t size)
2483 MemoryRegionSection ret;
2484 rcu_read_lock();
2485 ret = memory_region_find_rcu(mr, addr, size);
2486 if (ret.mr) {
2487 memory_region_ref(ret.mr);
2489 rcu_read_unlock();
2490 return ret;
2493 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2495 MemoryRegion *mr;
2497 rcu_read_lock();
2498 mr = memory_region_find_rcu(container, addr, 1).mr;
2499 rcu_read_unlock();
2500 return mr && mr != container;
2503 void memory_global_dirty_log_sync(void)
2505 MemoryListener *listener;
2506 AddressSpace *as;
2507 FlatView *view;
2508 FlatRange *fr;
2510 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2511 if (!listener->log_sync) {
2512 continue;
2514 as = listener->address_space;
2515 view = address_space_get_flatview(as);
2516 FOR_EACH_FLAT_RANGE(fr, view) {
2517 if (fr->dirty_log_mask) {
2518 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2520 listener->log_sync(listener, &mrs);
2523 flatview_unref(view);
2527 static VMChangeStateEntry *vmstate_change;
2529 void memory_global_dirty_log_start(void)
2531 if (vmstate_change) {
2532 qemu_del_vm_change_state_handler(vmstate_change);
2533 vmstate_change = NULL;
2536 global_dirty_log = true;
2538 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2540 /* Refresh DIRTY_LOG_MIGRATION bit. */
2541 memory_region_transaction_begin();
2542 memory_region_update_pending = true;
2543 memory_region_transaction_commit();
2546 static void memory_global_dirty_log_do_stop(void)
2548 global_dirty_log = false;
2550 /* Refresh DIRTY_LOG_MIGRATION bit. */
2551 memory_region_transaction_begin();
2552 memory_region_update_pending = true;
2553 memory_region_transaction_commit();
2555 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2558 static void memory_vm_change_state_handler(void *opaque, int running,
2559 RunState state)
2561 if (running) {
2562 memory_global_dirty_log_do_stop();
2564 if (vmstate_change) {
2565 qemu_del_vm_change_state_handler(vmstate_change);
2566 vmstate_change = NULL;
2571 void memory_global_dirty_log_stop(void)
2573 if (!runstate_is_running()) {
2574 if (vmstate_change) {
2575 return;
2577 vmstate_change = qemu_add_vm_change_state_handler(
2578 memory_vm_change_state_handler, NULL);
2579 return;
2582 memory_global_dirty_log_do_stop();
2585 static void listener_add_address_space(MemoryListener *listener,
2586 AddressSpace *as)
2588 FlatView *view;
2589 FlatRange *fr;
2591 if (listener->begin) {
2592 listener->begin(listener);
2594 if (global_dirty_log) {
2595 if (listener->log_global_start) {
2596 listener->log_global_start(listener);
2600 view = address_space_get_flatview(as);
2601 FOR_EACH_FLAT_RANGE(fr, view) {
2602 MemoryRegionSection section = {
2603 .mr = fr->mr,
2604 .fv = view,
2605 .offset_within_region = fr->offset_in_region,
2606 .size = fr->addr.size,
2607 .offset_within_address_space = int128_get64(fr->addr.start),
2608 .readonly = fr->readonly,
2610 if (fr->dirty_log_mask && listener->log_start) {
2611 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2613 if (listener->region_add) {
2614 listener->region_add(listener, &section);
2617 if (listener->commit) {
2618 listener->commit(listener);
2620 flatview_unref(view);
2623 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2625 MemoryListener *other = NULL;
2627 listener->address_space = as;
2628 if (QTAILQ_EMPTY(&memory_listeners)
2629 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2630 memory_listeners)->priority) {
2631 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2632 } else {
2633 QTAILQ_FOREACH(other, &memory_listeners, link) {
2634 if (listener->priority < other->priority) {
2635 break;
2638 QTAILQ_INSERT_BEFORE(other, listener, link);
2641 if (QTAILQ_EMPTY(&as->listeners)
2642 || listener->priority >= QTAILQ_LAST(&as->listeners,
2643 memory_listeners)->priority) {
2644 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2645 } else {
2646 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2647 if (listener->priority < other->priority) {
2648 break;
2651 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2654 listener_add_address_space(listener, as);
2657 void memory_listener_unregister(MemoryListener *listener)
2659 if (!listener->address_space) {
2660 return;
2663 QTAILQ_REMOVE(&memory_listeners, listener, link);
2664 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2665 listener->address_space = NULL;
2668 bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2670 void *host;
2671 unsigned size = 0;
2672 unsigned offset = 0;
2673 Object *new_interface;
2675 if (!mr || !mr->ops->request_ptr) {
2676 return false;
2680 * Avoid an update if the request_ptr call
2681 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2682 * a cache.
2684 memory_region_transaction_begin();
2686 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2688 if (!host || !size) {
2689 memory_region_transaction_commit();
2690 return false;
2693 new_interface = object_new("mmio_interface");
2694 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2695 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2696 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2697 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2698 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2699 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2701 memory_region_transaction_commit();
2702 return true;
2705 typedef struct MMIOPtrInvalidate {
2706 MemoryRegion *mr;
2707 hwaddr offset;
2708 unsigned size;
2709 int busy;
2710 int allocated;
2711 } MMIOPtrInvalidate;
2713 #define MAX_MMIO_INVALIDATE 10
2714 static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2716 static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2717 run_on_cpu_data data)
2719 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2720 MemoryRegion *mr = invalidate_data->mr;
2721 hwaddr offset = invalidate_data->offset;
2722 unsigned size = invalidate_data->size;
2723 MemoryRegionSection section = memory_region_find(mr, offset, size);
2725 qemu_mutex_lock_iothread();
2727 /* Reset dirty so this doesn't happen later. */
2728 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2730 if (section.mr != mr) {
2731 /* memory_region_find add a ref on section.mr */
2732 memory_region_unref(section.mr);
2733 if (MMIO_INTERFACE(section.mr->owner)) {
2734 /* We found the interface just drop it. */
2735 object_property_set_bool(section.mr->owner, false, "realized",
2736 NULL);
2737 object_unref(section.mr->owner);
2738 object_unparent(section.mr->owner);
2742 qemu_mutex_unlock_iothread();
2744 if (invalidate_data->allocated) {
2745 g_free(invalidate_data);
2746 } else {
2747 invalidate_data->busy = 0;
2751 void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2752 unsigned size)
2754 size_t i;
2755 MMIOPtrInvalidate *invalidate_data = NULL;
2757 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2758 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2759 invalidate_data = &mmio_ptr_invalidate_list[i];
2760 break;
2764 if (!invalidate_data) {
2765 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2766 invalidate_data->allocated = 1;
2769 invalidate_data->mr = mr;
2770 invalidate_data->offset = offset;
2771 invalidate_data->size = size;
2773 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2774 RUN_ON_CPU_HOST_PTR(invalidate_data));
2777 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2779 memory_region_ref(root);
2780 as->root = root;
2781 as->current_map = NULL;
2782 as->ioeventfd_nb = 0;
2783 as->ioeventfds = NULL;
2784 QTAILQ_INIT(&as->listeners);
2785 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2786 as->name = g_strdup(name ? name : "anonymous");
2787 address_space_update_topology(as);
2788 address_space_update_ioeventfds(as);
2791 static void do_address_space_destroy(AddressSpace *as)
2793 assert(QTAILQ_EMPTY(&as->listeners));
2795 flatview_unref(as->current_map);
2796 g_free(as->name);
2797 g_free(as->ioeventfds);
2798 memory_region_unref(as->root);
2801 void address_space_destroy(AddressSpace *as)
2803 MemoryRegion *root = as->root;
2805 /* Flush out anything from MemoryListeners listening in on this */
2806 memory_region_transaction_begin();
2807 as->root = NULL;
2808 memory_region_transaction_commit();
2809 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2811 /* At this point, as->dispatch and as->current_map are dummy
2812 * entries that the guest should never use. Wait for the old
2813 * values to expire before freeing the data.
2815 as->root = root;
2816 call_rcu(as, do_address_space_destroy, rcu);
2819 static const char *memory_region_type(MemoryRegion *mr)
2821 if (memory_region_is_ram_device(mr)) {
2822 return "ramd";
2823 } else if (memory_region_is_romd(mr)) {
2824 return "romd";
2825 } else if (memory_region_is_rom(mr)) {
2826 return "rom";
2827 } else if (memory_region_is_ram(mr)) {
2828 return "ram";
2829 } else {
2830 return "i/o";
2834 typedef struct MemoryRegionList MemoryRegionList;
2836 struct MemoryRegionList {
2837 const MemoryRegion *mr;
2838 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
2841 typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
2843 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2844 int128_sub((size), int128_one())) : 0)
2845 #define MTREE_INDENT " "
2847 static void mtree_print_mr(fprintf_function mon_printf, void *f,
2848 const MemoryRegion *mr, unsigned int level,
2849 hwaddr base,
2850 MemoryRegionListHead *alias_print_queue)
2852 MemoryRegionList *new_ml, *ml, *next_ml;
2853 MemoryRegionListHead submr_print_queue;
2854 const MemoryRegion *submr;
2855 unsigned int i;
2856 hwaddr cur_start, cur_end;
2858 if (!mr) {
2859 return;
2862 for (i = 0; i < level; i++) {
2863 mon_printf(f, MTREE_INDENT);
2866 cur_start = base + mr->addr;
2867 cur_end = cur_start + MR_SIZE(mr->size);
2870 * Try to detect overflow of memory region. This should never
2871 * happen normally. When it happens, we dump something to warn the
2872 * user who is observing this.
2874 if (cur_start < base || cur_end < cur_start) {
2875 mon_printf(f, "[DETECTED OVERFLOW!] ");
2878 if (mr->alias) {
2879 MemoryRegionList *ml;
2880 bool found = false;
2882 /* check if the alias is already in the queue */
2883 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
2884 if (ml->mr == mr->alias) {
2885 found = true;
2889 if (!found) {
2890 ml = g_new(MemoryRegionList, 1);
2891 ml->mr = mr->alias;
2892 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
2894 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2895 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2896 "-" TARGET_FMT_plx "%s\n",
2897 cur_start, cur_end,
2898 mr->priority,
2899 memory_region_type((MemoryRegion *)mr),
2900 memory_region_name(mr),
2901 memory_region_name(mr->alias),
2902 mr->alias_offset,
2903 mr->alias_offset + MR_SIZE(mr->size),
2904 mr->enabled ? "" : " [disabled]");
2905 } else {
2906 mon_printf(f,
2907 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
2908 cur_start, cur_end,
2909 mr->priority,
2910 memory_region_type((MemoryRegion *)mr),
2911 memory_region_name(mr),
2912 mr->enabled ? "" : " [disabled]");
2915 QTAILQ_INIT(&submr_print_queue);
2917 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2918 new_ml = g_new(MemoryRegionList, 1);
2919 new_ml->mr = submr;
2920 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2921 if (new_ml->mr->addr < ml->mr->addr ||
2922 (new_ml->mr->addr == ml->mr->addr &&
2923 new_ml->mr->priority > ml->mr->priority)) {
2924 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2925 new_ml = NULL;
2926 break;
2929 if (new_ml) {
2930 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2934 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2935 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
2936 alias_print_queue);
2939 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
2940 g_free(ml);
2944 struct FlatViewInfo {
2945 fprintf_function mon_printf;
2946 void *f;
2947 int counter;
2948 bool dispatch_tree;
2951 static void mtree_print_flatview(gpointer key, gpointer value,
2952 gpointer user_data)
2954 FlatView *view = key;
2955 GArray *fv_address_spaces = value;
2956 struct FlatViewInfo *fvi = user_data;
2957 fprintf_function p = fvi->mon_printf;
2958 void *f = fvi->f;
2959 FlatRange *range = &view->ranges[0];
2960 MemoryRegion *mr;
2961 int n = view->nr;
2962 int i;
2963 AddressSpace *as;
2965 p(f, "FlatView #%d\n", fvi->counter);
2966 ++fvi->counter;
2968 for (i = 0; i < fv_address_spaces->len; ++i) {
2969 as = g_array_index(fv_address_spaces, AddressSpace*, i);
2970 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
2971 if (as->root->alias) {
2972 p(f, ", alias %s", memory_region_name(as->root->alias));
2974 p(f, "\n");
2977 p(f, " Root memory region: %s\n",
2978 view->root ? memory_region_name(view->root) : "(none)");
2980 if (n <= 0) {
2981 p(f, MTREE_INDENT "No rendered FlatView\n\n");
2982 return;
2985 while (n--) {
2986 mr = range->mr;
2987 if (range->offset_in_region) {
2988 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2989 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2990 int128_get64(range->addr.start),
2991 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2992 mr->priority,
2993 range->readonly ? "rom" : memory_region_type(mr),
2994 memory_region_name(mr),
2995 range->offset_in_region);
2996 } else {
2997 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2998 TARGET_FMT_plx " (prio %d, %s): %s\n",
2999 int128_get64(range->addr.start),
3000 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
3001 mr->priority,
3002 range->readonly ? "rom" : memory_region_type(mr),
3003 memory_region_name(mr));
3005 range++;
3008 #if !defined(CONFIG_USER_ONLY)
3009 if (fvi->dispatch_tree && view->root) {
3010 mtree_print_dispatch(p, f, view->dispatch, view->root);
3012 #endif
3014 p(f, "\n");
3017 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3018 gpointer user_data)
3020 FlatView *view = key;
3021 GArray *fv_address_spaces = value;
3023 g_array_unref(fv_address_spaces);
3024 flatview_unref(view);
3026 return true;
3029 void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
3030 bool dispatch_tree)
3032 MemoryRegionListHead ml_head;
3033 MemoryRegionList *ml, *ml2;
3034 AddressSpace *as;
3036 if (flatview) {
3037 FlatView *view;
3038 struct FlatViewInfo fvi = {
3039 .mon_printf = mon_printf,
3040 .f = f,
3041 .counter = 0,
3042 .dispatch_tree = dispatch_tree
3044 GArray *fv_address_spaces;
3045 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3047 /* Gather all FVs in one table */
3048 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3049 view = address_space_get_flatview(as);
3051 fv_address_spaces = g_hash_table_lookup(views, view);
3052 if (!fv_address_spaces) {
3053 fv_address_spaces = g_array_new(false, false, sizeof(as));
3054 g_hash_table_insert(views, view, fv_address_spaces);
3057 g_array_append_val(fv_address_spaces, as);
3060 /* Print */
3061 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3063 /* Free */
3064 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3065 g_hash_table_unref(views);
3067 return;
3070 QTAILQ_INIT(&ml_head);
3072 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3073 mon_printf(f, "address-space: %s\n", as->name);
3074 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
3075 mon_printf(f, "\n");
3078 /* print aliased regions */
3079 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3080 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
3081 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
3082 mon_printf(f, "\n");
3085 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3086 g_free(ml);
3090 void memory_region_init_ram(MemoryRegion *mr,
3091 struct Object *owner,
3092 const char *name,
3093 uint64_t size,
3094 Error **errp)
3096 DeviceState *owner_dev;
3097 Error *err = NULL;
3099 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3100 if (err) {
3101 error_propagate(errp, err);
3102 return;
3104 /* This will assert if owner is neither NULL nor a DeviceState.
3105 * We only want the owner here for the purposes of defining a
3106 * unique name for migration. TODO: Ideally we should implement
3107 * a naming scheme for Objects which are not DeviceStates, in
3108 * which case we can relax this restriction.
3110 owner_dev = DEVICE(owner);
3111 vmstate_register_ram(mr, owner_dev);
3114 void memory_region_init_rom(MemoryRegion *mr,
3115 struct Object *owner,
3116 const char *name,
3117 uint64_t size,
3118 Error **errp)
3120 DeviceState *owner_dev;
3121 Error *err = NULL;
3123 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3124 if (err) {
3125 error_propagate(errp, err);
3126 return;
3128 /* This will assert if owner is neither NULL nor a DeviceState.
3129 * We only want the owner here for the purposes of defining a
3130 * unique name for migration. TODO: Ideally we should implement
3131 * a naming scheme for Objects which are not DeviceStates, in
3132 * which case we can relax this restriction.
3134 owner_dev = DEVICE(owner);
3135 vmstate_register_ram(mr, owner_dev);
3138 void memory_region_init_rom_device(MemoryRegion *mr,
3139 struct Object *owner,
3140 const MemoryRegionOps *ops,
3141 void *opaque,
3142 const char *name,
3143 uint64_t size,
3144 Error **errp)
3146 DeviceState *owner_dev;
3147 Error *err = NULL;
3149 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3150 name, size, &err);
3151 if (err) {
3152 error_propagate(errp, err);
3153 return;
3155 /* This will assert if owner is neither NULL nor a DeviceState.
3156 * We only want the owner here for the purposes of defining a
3157 * unique name for migration. TODO: Ideally we should implement
3158 * a naming scheme for Objects which are not DeviceStates, in
3159 * which case we can relax this restriction.
3161 owner_dev = DEVICE(owner);
3162 vmstate_register_ram(mr, owner_dev);
3165 static const TypeInfo memory_region_info = {
3166 .parent = TYPE_OBJECT,
3167 .name = TYPE_MEMORY_REGION,
3168 .instance_size = sizeof(MemoryRegion),
3169 .instance_init = memory_region_initfn,
3170 .instance_finalize = memory_region_finalize,
3173 static const TypeInfo iommu_memory_region_info = {
3174 .parent = TYPE_MEMORY_REGION,
3175 .name = TYPE_IOMMU_MEMORY_REGION,
3176 .class_size = sizeof(IOMMUMemoryRegionClass),
3177 .instance_size = sizeof(IOMMUMemoryRegion),
3178 .instance_init = iommu_memory_region_initfn,
3179 .abstract = true,
3182 static void memory_register_types(void)
3184 type_register_static(&memory_region_info);
3185 type_register_static(&iommu_memory_region_info);
3188 type_init(memory_register_types)