usb: Change *_exitfn return type from int to void
[qemu/kevin.git] / memory.c
blobedbc7012b68492cea52a92543ef6fd1aa0d07266
1 /*
2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "cpu.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "exec/ioport.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qom/object.h"
27 #include "trace.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
34 //#define DEBUG_UNASSIGNED
36 static unsigned memory_region_transaction_depth;
37 static bool memory_region_update_pending;
38 static bool ioeventfd_update_pending;
39 static bool global_dirty_log = false;
41 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
42 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
44 static QTAILQ_HEAD(, AddressSpace) address_spaces
45 = QTAILQ_HEAD_INITIALIZER(address_spaces);
47 typedef struct AddrRange AddrRange;
50 * Note that signed integers are needed for negative offsetting in aliases
51 * (large MemoryRegion::alias_offset).
53 struct AddrRange {
54 Int128 start;
55 Int128 size;
58 static AddrRange addrrange_make(Int128 start, Int128 size)
60 return (AddrRange) { start, size };
63 static bool addrrange_equal(AddrRange r1, AddrRange r2)
65 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
68 static Int128 addrrange_end(AddrRange r)
70 return int128_add(r.start, r.size);
73 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
75 int128_addto(&range.start, delta);
76 return range;
79 static bool addrrange_contains(AddrRange range, Int128 addr)
81 return int128_ge(addr, range.start)
82 && int128_lt(addr, addrrange_end(range));
85 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
87 return addrrange_contains(r1, r2.start)
88 || addrrange_contains(r2, r1.start);
91 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
93 Int128 start = int128_max(r1.start, r2.start);
94 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
95 return addrrange_make(start, int128_sub(end, start));
98 enum ListenerDirection { Forward, Reverse };
100 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
101 do { \
102 MemoryListener *_listener; \
104 switch (_direction) { \
105 case Forward: \
106 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
107 if (_listener->_callback) { \
108 _listener->_callback(_listener, ##_args); \
111 break; \
112 case Reverse: \
113 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
114 memory_listeners, link) { \
115 if (_listener->_callback) { \
116 _listener->_callback(_listener, ##_args); \
119 break; \
120 default: \
121 abort(); \
123 } while (0)
125 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
126 do { \
127 MemoryListener *_listener; \
128 struct memory_listeners_as *list = &(_as)->listeners; \
130 switch (_direction) { \
131 case Forward: \
132 QTAILQ_FOREACH(_listener, list, link_as) { \
133 if (_listener->_callback) { \
134 _listener->_callback(_listener, _section, ##_args); \
137 break; \
138 case Reverse: \
139 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
140 link_as) { \
141 if (_listener->_callback) { \
142 _listener->_callback(_listener, _section, ##_args); \
145 break; \
146 default: \
147 abort(); \
149 } while (0)
151 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
152 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
153 do { \
154 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
155 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
156 } while(0)
158 struct CoalescedMemoryRange {
159 AddrRange addr;
160 QTAILQ_ENTRY(CoalescedMemoryRange) link;
163 struct MemoryRegionIoeventfd {
164 AddrRange addr;
165 bool match_data;
166 uint64_t data;
167 EventNotifier *e;
170 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
171 MemoryRegionIoeventfd b)
173 if (int128_lt(a.addr.start, b.addr.start)) {
174 return true;
175 } else if (int128_gt(a.addr.start, b.addr.start)) {
176 return false;
177 } else if (int128_lt(a.addr.size, b.addr.size)) {
178 return true;
179 } else if (int128_gt(a.addr.size, b.addr.size)) {
180 return false;
181 } else if (a.match_data < b.match_data) {
182 return true;
183 } else if (a.match_data > b.match_data) {
184 return false;
185 } else if (a.match_data) {
186 if (a.data < b.data) {
187 return true;
188 } else if (a.data > b.data) {
189 return false;
192 if (a.e < b.e) {
193 return true;
194 } else if (a.e > b.e) {
195 return false;
197 return false;
200 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
201 MemoryRegionIoeventfd b)
203 return !memory_region_ioeventfd_before(a, b)
204 && !memory_region_ioeventfd_before(b, a);
207 typedef struct FlatRange FlatRange;
208 typedef struct FlatView FlatView;
210 /* Range of memory in the global map. Addresses are absolute. */
211 struct FlatRange {
212 MemoryRegion *mr;
213 hwaddr offset_in_region;
214 AddrRange addr;
215 uint8_t dirty_log_mask;
216 bool romd_mode;
217 bool readonly;
220 /* Flattened global view of current active memory hierarchy. Kept in sorted
221 * order.
223 struct FlatView {
224 struct rcu_head rcu;
225 unsigned ref;
226 FlatRange *ranges;
227 unsigned nr;
228 unsigned nr_allocated;
231 typedef struct AddressSpaceOps AddressSpaceOps;
233 #define FOR_EACH_FLAT_RANGE(var, view) \
234 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
236 static inline MemoryRegionSection
237 section_from_flat_range(FlatRange *fr, AddressSpace *as)
239 return (MemoryRegionSection) {
240 .mr = fr->mr,
241 .address_space = as,
242 .offset_within_region = fr->offset_in_region,
243 .size = fr->addr.size,
244 .offset_within_address_space = int128_get64(fr->addr.start),
245 .readonly = fr->readonly,
249 static bool flatrange_equal(FlatRange *a, FlatRange *b)
251 return a->mr == b->mr
252 && addrrange_equal(a->addr, b->addr)
253 && a->offset_in_region == b->offset_in_region
254 && a->romd_mode == b->romd_mode
255 && a->readonly == b->readonly;
258 static void flatview_init(FlatView *view)
260 view->ref = 1;
261 view->ranges = NULL;
262 view->nr = 0;
263 view->nr_allocated = 0;
266 /* Insert a range into a given position. Caller is responsible for maintaining
267 * sorting order.
269 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
271 if (view->nr == view->nr_allocated) {
272 view->nr_allocated = MAX(2 * view->nr, 10);
273 view->ranges = g_realloc(view->ranges,
274 view->nr_allocated * sizeof(*view->ranges));
276 memmove(view->ranges + pos + 1, view->ranges + pos,
277 (view->nr - pos) * sizeof(FlatRange));
278 view->ranges[pos] = *range;
279 memory_region_ref(range->mr);
280 ++view->nr;
283 static void flatview_destroy(FlatView *view)
285 int i;
287 for (i = 0; i < view->nr; i++) {
288 memory_region_unref(view->ranges[i].mr);
290 g_free(view->ranges);
291 g_free(view);
294 static void flatview_ref(FlatView *view)
296 atomic_inc(&view->ref);
299 static void flatview_unref(FlatView *view)
301 if (atomic_fetch_dec(&view->ref) == 1) {
302 flatview_destroy(view);
306 static bool can_merge(FlatRange *r1, FlatRange *r2)
308 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
309 && r1->mr == r2->mr
310 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
311 r1->addr.size),
312 int128_make64(r2->offset_in_region))
313 && r1->dirty_log_mask == r2->dirty_log_mask
314 && r1->romd_mode == r2->romd_mode
315 && r1->readonly == r2->readonly;
318 /* Attempt to simplify a view by merging adjacent ranges */
319 static void flatview_simplify(FlatView *view)
321 unsigned i, j;
323 i = 0;
324 while (i < view->nr) {
325 j = i + 1;
326 while (j < view->nr
327 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
328 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
329 ++j;
331 ++i;
332 memmove(&view->ranges[i], &view->ranges[j],
333 (view->nr - j) * sizeof(view->ranges[j]));
334 view->nr -= j - i;
338 static bool memory_region_big_endian(MemoryRegion *mr)
340 #ifdef TARGET_WORDS_BIGENDIAN
341 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
342 #else
343 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
344 #endif
347 static bool memory_region_wrong_endianness(MemoryRegion *mr)
349 #ifdef TARGET_WORDS_BIGENDIAN
350 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
351 #else
352 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
353 #endif
356 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
358 if (memory_region_wrong_endianness(mr)) {
359 switch (size) {
360 case 1:
361 break;
362 case 2:
363 *data = bswap16(*data);
364 break;
365 case 4:
366 *data = bswap32(*data);
367 break;
368 case 8:
369 *data = bswap64(*data);
370 break;
371 default:
372 abort();
377 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
379 MemoryRegion *root;
380 hwaddr abs_addr = offset;
382 abs_addr += mr->addr;
383 for (root = mr; root->container; ) {
384 root = root->container;
385 abs_addr += root->addr;
388 return abs_addr;
391 static int get_cpu_index(void)
393 if (current_cpu) {
394 return current_cpu->cpu_index;
396 return -1;
399 static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
400 hwaddr addr,
401 uint64_t *value,
402 unsigned size,
403 unsigned shift,
404 uint64_t mask,
405 MemTxAttrs attrs)
407 uint64_t tmp;
409 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
410 if (mr->subpage) {
411 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
412 } else if (mr == &io_mem_notdirty) {
413 /* Accesses to code which has previously been translated into a TB show
414 * up in the MMIO path, as accesses to the io_mem_notdirty
415 * MemoryRegion. */
416 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
417 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
418 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
419 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
421 *value |= (tmp & mask) << shift;
422 return MEMTX_OK;
425 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
426 hwaddr addr,
427 uint64_t *value,
428 unsigned size,
429 unsigned shift,
430 uint64_t mask,
431 MemTxAttrs attrs)
433 uint64_t tmp;
435 tmp = mr->ops->read(mr->opaque, addr, size);
436 if (mr->subpage) {
437 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
438 } else if (mr == &io_mem_notdirty) {
439 /* Accesses to code which has previously been translated into a TB show
440 * up in the MMIO path, as accesses to the io_mem_notdirty
441 * MemoryRegion. */
442 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
443 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
444 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
445 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
447 *value |= (tmp & mask) << shift;
448 return MEMTX_OK;
451 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
452 hwaddr addr,
453 uint64_t *value,
454 unsigned size,
455 unsigned shift,
456 uint64_t mask,
457 MemTxAttrs attrs)
459 uint64_t tmp = 0;
460 MemTxResult r;
462 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
463 if (mr->subpage) {
464 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
465 } else if (mr == &io_mem_notdirty) {
466 /* Accesses to code which has previously been translated into a TB show
467 * up in the MMIO path, as accesses to the io_mem_notdirty
468 * MemoryRegion. */
469 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
470 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
471 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
472 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
474 *value |= (tmp & mask) << shift;
475 return r;
478 static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
479 hwaddr addr,
480 uint64_t *value,
481 unsigned size,
482 unsigned shift,
483 uint64_t mask,
484 MemTxAttrs attrs)
486 uint64_t tmp;
488 tmp = (*value >> shift) & mask;
489 if (mr->subpage) {
490 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
491 } else if (mr == &io_mem_notdirty) {
492 /* Accesses to code which has previously been translated into a TB show
493 * up in the MMIO path, as accesses to the io_mem_notdirty
494 * MemoryRegion. */
495 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
496 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
497 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
498 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
500 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
501 return MEMTX_OK;
504 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
505 hwaddr addr,
506 uint64_t *value,
507 unsigned size,
508 unsigned shift,
509 uint64_t mask,
510 MemTxAttrs attrs)
512 uint64_t tmp;
514 tmp = (*value >> shift) & mask;
515 if (mr->subpage) {
516 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
517 } else if (mr == &io_mem_notdirty) {
518 /* Accesses to code which has previously been translated into a TB show
519 * up in the MMIO path, as accesses to the io_mem_notdirty
520 * MemoryRegion. */
521 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
522 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
523 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
524 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
526 mr->ops->write(mr->opaque, addr, tmp, size);
527 return MEMTX_OK;
530 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
531 hwaddr addr,
532 uint64_t *value,
533 unsigned size,
534 unsigned shift,
535 uint64_t mask,
536 MemTxAttrs attrs)
538 uint64_t tmp;
540 tmp = (*value >> shift) & mask;
541 if (mr->subpage) {
542 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
543 } else if (mr == &io_mem_notdirty) {
544 /* Accesses to code which has previously been translated into a TB show
545 * up in the MMIO path, as accesses to the io_mem_notdirty
546 * MemoryRegion. */
547 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
548 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
549 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
550 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
552 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
555 static MemTxResult access_with_adjusted_size(hwaddr addr,
556 uint64_t *value,
557 unsigned size,
558 unsigned access_size_min,
559 unsigned access_size_max,
560 MemTxResult (*access)(MemoryRegion *mr,
561 hwaddr addr,
562 uint64_t *value,
563 unsigned size,
564 unsigned shift,
565 uint64_t mask,
566 MemTxAttrs attrs),
567 MemoryRegion *mr,
568 MemTxAttrs attrs)
570 uint64_t access_mask;
571 unsigned access_size;
572 unsigned i;
573 MemTxResult r = MEMTX_OK;
575 if (!access_size_min) {
576 access_size_min = 1;
578 if (!access_size_max) {
579 access_size_max = 4;
582 /* FIXME: support unaligned access? */
583 access_size = MAX(MIN(size, access_size_max), access_size_min);
584 access_mask = -1ULL >> (64 - access_size * 8);
585 if (memory_region_big_endian(mr)) {
586 for (i = 0; i < size; i += access_size) {
587 r |= access(mr, addr + i, value, access_size,
588 (size - access_size - i) * 8, access_mask, attrs);
590 } else {
591 for (i = 0; i < size; i += access_size) {
592 r |= access(mr, addr + i, value, access_size, i * 8,
593 access_mask, attrs);
596 return r;
599 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
601 AddressSpace *as;
603 while (mr->container) {
604 mr = mr->container;
606 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
607 if (mr == as->root) {
608 return as;
611 return NULL;
614 /* Render a memory region into the global view. Ranges in @view obscure
615 * ranges in @mr.
617 static void render_memory_region(FlatView *view,
618 MemoryRegion *mr,
619 Int128 base,
620 AddrRange clip,
621 bool readonly)
623 MemoryRegion *subregion;
624 unsigned i;
625 hwaddr offset_in_region;
626 Int128 remain;
627 Int128 now;
628 FlatRange fr;
629 AddrRange tmp;
631 if (!mr->enabled) {
632 return;
635 int128_addto(&base, int128_make64(mr->addr));
636 readonly |= mr->readonly;
638 tmp = addrrange_make(base, mr->size);
640 if (!addrrange_intersects(tmp, clip)) {
641 return;
644 clip = addrrange_intersection(tmp, clip);
646 if (mr->alias) {
647 int128_subfrom(&base, int128_make64(mr->alias->addr));
648 int128_subfrom(&base, int128_make64(mr->alias_offset));
649 render_memory_region(view, mr->alias, base, clip, readonly);
650 return;
653 /* Render subregions in priority order. */
654 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
655 render_memory_region(view, subregion, base, clip, readonly);
658 if (!mr->terminates) {
659 return;
662 offset_in_region = int128_get64(int128_sub(clip.start, base));
663 base = clip.start;
664 remain = clip.size;
666 fr.mr = mr;
667 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
668 fr.romd_mode = mr->romd_mode;
669 fr.readonly = readonly;
671 /* Render the region itself into any gaps left by the current view. */
672 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
673 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
674 continue;
676 if (int128_lt(base, view->ranges[i].addr.start)) {
677 now = int128_min(remain,
678 int128_sub(view->ranges[i].addr.start, base));
679 fr.offset_in_region = offset_in_region;
680 fr.addr = addrrange_make(base, now);
681 flatview_insert(view, i, &fr);
682 ++i;
683 int128_addto(&base, now);
684 offset_in_region += int128_get64(now);
685 int128_subfrom(&remain, now);
687 now = int128_sub(int128_min(int128_add(base, remain),
688 addrrange_end(view->ranges[i].addr)),
689 base);
690 int128_addto(&base, now);
691 offset_in_region += int128_get64(now);
692 int128_subfrom(&remain, now);
694 if (int128_nz(remain)) {
695 fr.offset_in_region = offset_in_region;
696 fr.addr = addrrange_make(base, remain);
697 flatview_insert(view, i, &fr);
701 /* Render a memory topology into a list of disjoint absolute ranges. */
702 static FlatView *generate_memory_topology(MemoryRegion *mr)
704 FlatView *view;
706 view = g_new(FlatView, 1);
707 flatview_init(view);
709 if (mr) {
710 render_memory_region(view, mr, int128_zero(),
711 addrrange_make(int128_zero(), int128_2_64()), false);
713 flatview_simplify(view);
715 return view;
718 static void address_space_add_del_ioeventfds(AddressSpace *as,
719 MemoryRegionIoeventfd *fds_new,
720 unsigned fds_new_nb,
721 MemoryRegionIoeventfd *fds_old,
722 unsigned fds_old_nb)
724 unsigned iold, inew;
725 MemoryRegionIoeventfd *fd;
726 MemoryRegionSection section;
728 /* Generate a symmetric difference of the old and new fd sets, adding
729 * and deleting as necessary.
732 iold = inew = 0;
733 while (iold < fds_old_nb || inew < fds_new_nb) {
734 if (iold < fds_old_nb
735 && (inew == fds_new_nb
736 || memory_region_ioeventfd_before(fds_old[iold],
737 fds_new[inew]))) {
738 fd = &fds_old[iold];
739 section = (MemoryRegionSection) {
740 .address_space = as,
741 .offset_within_address_space = int128_get64(fd->addr.start),
742 .size = fd->addr.size,
744 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
745 fd->match_data, fd->data, fd->e);
746 ++iold;
747 } else if (inew < fds_new_nb
748 && (iold == fds_old_nb
749 || memory_region_ioeventfd_before(fds_new[inew],
750 fds_old[iold]))) {
751 fd = &fds_new[inew];
752 section = (MemoryRegionSection) {
753 .address_space = as,
754 .offset_within_address_space = int128_get64(fd->addr.start),
755 .size = fd->addr.size,
757 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
758 fd->match_data, fd->data, fd->e);
759 ++inew;
760 } else {
761 ++iold;
762 ++inew;
767 static FlatView *address_space_get_flatview(AddressSpace *as)
769 FlatView *view;
771 rcu_read_lock();
772 view = atomic_rcu_read(&as->current_map);
773 flatview_ref(view);
774 rcu_read_unlock();
775 return view;
778 static void address_space_update_ioeventfds(AddressSpace *as)
780 FlatView *view;
781 FlatRange *fr;
782 unsigned ioeventfd_nb = 0;
783 MemoryRegionIoeventfd *ioeventfds = NULL;
784 AddrRange tmp;
785 unsigned i;
787 view = address_space_get_flatview(as);
788 FOR_EACH_FLAT_RANGE(fr, view) {
789 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
790 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
791 int128_sub(fr->addr.start,
792 int128_make64(fr->offset_in_region)));
793 if (addrrange_intersects(fr->addr, tmp)) {
794 ++ioeventfd_nb;
795 ioeventfds = g_realloc(ioeventfds,
796 ioeventfd_nb * sizeof(*ioeventfds));
797 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
798 ioeventfds[ioeventfd_nb-1].addr = tmp;
803 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
804 as->ioeventfds, as->ioeventfd_nb);
806 g_free(as->ioeventfds);
807 as->ioeventfds = ioeventfds;
808 as->ioeventfd_nb = ioeventfd_nb;
809 flatview_unref(view);
812 static void address_space_update_topology_pass(AddressSpace *as,
813 const FlatView *old_view,
814 const FlatView *new_view,
815 bool adding)
817 unsigned iold, inew;
818 FlatRange *frold, *frnew;
820 /* Generate a symmetric difference of the old and new memory maps.
821 * Kill ranges in the old map, and instantiate ranges in the new map.
823 iold = inew = 0;
824 while (iold < old_view->nr || inew < new_view->nr) {
825 if (iold < old_view->nr) {
826 frold = &old_view->ranges[iold];
827 } else {
828 frold = NULL;
830 if (inew < new_view->nr) {
831 frnew = &new_view->ranges[inew];
832 } else {
833 frnew = NULL;
836 if (frold
837 && (!frnew
838 || int128_lt(frold->addr.start, frnew->addr.start)
839 || (int128_eq(frold->addr.start, frnew->addr.start)
840 && !flatrange_equal(frold, frnew)))) {
841 /* In old but not in new, or in both but attributes changed. */
843 if (!adding) {
844 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
847 ++iold;
848 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
849 /* In both and unchanged (except logging may have changed) */
851 if (adding) {
852 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
853 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
854 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
855 frold->dirty_log_mask,
856 frnew->dirty_log_mask);
858 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
859 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
860 frold->dirty_log_mask,
861 frnew->dirty_log_mask);
865 ++iold;
866 ++inew;
867 } else {
868 /* In new */
870 if (adding) {
871 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
874 ++inew;
880 static void address_space_update_topology(AddressSpace *as)
882 FlatView *old_view = address_space_get_flatview(as);
883 FlatView *new_view = generate_memory_topology(as->root);
885 address_space_update_topology_pass(as, old_view, new_view, false);
886 address_space_update_topology_pass(as, old_view, new_view, true);
888 /* Writes are protected by the BQL. */
889 atomic_rcu_set(&as->current_map, new_view);
890 call_rcu(old_view, flatview_unref, rcu);
892 /* Note that all the old MemoryRegions are still alive up to this
893 * point. This relieves most MemoryListeners from the need to
894 * ref/unref the MemoryRegions they get---unless they use them
895 * outside the iothread mutex, in which case precise reference
896 * counting is necessary.
898 flatview_unref(old_view);
900 address_space_update_ioeventfds(as);
903 void memory_region_transaction_begin(void)
905 qemu_flush_coalesced_mmio_buffer();
906 ++memory_region_transaction_depth;
909 static void memory_region_clear_pending(void)
911 memory_region_update_pending = false;
912 ioeventfd_update_pending = false;
915 void memory_region_transaction_commit(void)
917 AddressSpace *as;
919 assert(memory_region_transaction_depth);
920 --memory_region_transaction_depth;
921 if (!memory_region_transaction_depth) {
922 if (memory_region_update_pending) {
923 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
925 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
926 address_space_update_topology(as);
929 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
930 } else if (ioeventfd_update_pending) {
931 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
932 address_space_update_ioeventfds(as);
935 memory_region_clear_pending();
939 static void memory_region_destructor_none(MemoryRegion *mr)
943 static void memory_region_destructor_ram(MemoryRegion *mr)
945 qemu_ram_free(mr->ram_block);
948 static bool memory_region_need_escape(char c)
950 return c == '/' || c == '[' || c == '\\' || c == ']';
953 static char *memory_region_escape_name(const char *name)
955 const char *p;
956 char *escaped, *q;
957 uint8_t c;
958 size_t bytes = 0;
960 for (p = name; *p; p++) {
961 bytes += memory_region_need_escape(*p) ? 4 : 1;
963 if (bytes == p - name) {
964 return g_memdup(name, bytes + 1);
967 escaped = g_malloc(bytes + 1);
968 for (p = name, q = escaped; *p; p++) {
969 c = *p;
970 if (unlikely(memory_region_need_escape(c))) {
971 *q++ = '\\';
972 *q++ = 'x';
973 *q++ = "0123456789abcdef"[c >> 4];
974 c = "0123456789abcdef"[c & 15];
976 *q++ = c;
978 *q = 0;
979 return escaped;
982 void memory_region_init(MemoryRegion *mr,
983 Object *owner,
984 const char *name,
985 uint64_t size)
987 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
988 mr->size = int128_make64(size);
989 if (size == UINT64_MAX) {
990 mr->size = int128_2_64();
992 mr->name = g_strdup(name);
993 mr->owner = owner;
994 mr->ram_block = NULL;
996 if (name) {
997 char *escaped_name = memory_region_escape_name(name);
998 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1000 if (!owner) {
1001 owner = container_get(qdev_get_machine(), "/unattached");
1004 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1005 object_unref(OBJECT(mr));
1006 g_free(name_array);
1007 g_free(escaped_name);
1011 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1012 void *opaque, Error **errp)
1014 MemoryRegion *mr = MEMORY_REGION(obj);
1015 uint64_t value = mr->addr;
1017 visit_type_uint64(v, name, &value, errp);
1020 static void memory_region_get_container(Object *obj, Visitor *v,
1021 const char *name, void *opaque,
1022 Error **errp)
1024 MemoryRegion *mr = MEMORY_REGION(obj);
1025 gchar *path = (gchar *)"";
1027 if (mr->container) {
1028 path = object_get_canonical_path(OBJECT(mr->container));
1030 visit_type_str(v, name, &path, errp);
1031 if (mr->container) {
1032 g_free(path);
1036 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1037 const char *part)
1039 MemoryRegion *mr = MEMORY_REGION(obj);
1041 return OBJECT(mr->container);
1044 static void memory_region_get_priority(Object *obj, Visitor *v,
1045 const char *name, void *opaque,
1046 Error **errp)
1048 MemoryRegion *mr = MEMORY_REGION(obj);
1049 int32_t value = mr->priority;
1051 visit_type_int32(v, name, &value, errp);
1054 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1055 void *opaque, Error **errp)
1057 MemoryRegion *mr = MEMORY_REGION(obj);
1058 uint64_t value = memory_region_size(mr);
1060 visit_type_uint64(v, name, &value, errp);
1063 static void memory_region_initfn(Object *obj)
1065 MemoryRegion *mr = MEMORY_REGION(obj);
1066 ObjectProperty *op;
1068 mr->ops = &unassigned_mem_ops;
1069 mr->enabled = true;
1070 mr->romd_mode = true;
1071 mr->global_locking = true;
1072 mr->destructor = memory_region_destructor_none;
1073 QTAILQ_INIT(&mr->subregions);
1074 QTAILQ_INIT(&mr->coalesced);
1076 op = object_property_add(OBJECT(mr), "container",
1077 "link<" TYPE_MEMORY_REGION ">",
1078 memory_region_get_container,
1079 NULL, /* memory_region_set_container */
1080 NULL, NULL, &error_abort);
1081 op->resolve = memory_region_resolve_container;
1083 object_property_add(OBJECT(mr), "addr", "uint64",
1084 memory_region_get_addr,
1085 NULL, /* memory_region_set_addr */
1086 NULL, NULL, &error_abort);
1087 object_property_add(OBJECT(mr), "priority", "uint32",
1088 memory_region_get_priority,
1089 NULL, /* memory_region_set_priority */
1090 NULL, NULL, &error_abort);
1091 object_property_add(OBJECT(mr), "size", "uint64",
1092 memory_region_get_size,
1093 NULL, /* memory_region_set_size, */
1094 NULL, NULL, &error_abort);
1097 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1098 unsigned size)
1100 #ifdef DEBUG_UNASSIGNED
1101 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1102 #endif
1103 if (current_cpu != NULL) {
1104 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1106 return 0;
1109 static void unassigned_mem_write(void *opaque, hwaddr addr,
1110 uint64_t val, unsigned size)
1112 #ifdef DEBUG_UNASSIGNED
1113 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1114 #endif
1115 if (current_cpu != NULL) {
1116 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1120 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1121 unsigned size, bool is_write)
1123 return false;
1126 const MemoryRegionOps unassigned_mem_ops = {
1127 .valid.accepts = unassigned_mem_accepts,
1128 .endianness = DEVICE_NATIVE_ENDIAN,
1131 bool memory_region_access_valid(MemoryRegion *mr,
1132 hwaddr addr,
1133 unsigned size,
1134 bool is_write)
1136 int access_size_min, access_size_max;
1137 int access_size, i;
1139 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1140 return false;
1143 if (!mr->ops->valid.accepts) {
1144 return true;
1147 access_size_min = mr->ops->valid.min_access_size;
1148 if (!mr->ops->valid.min_access_size) {
1149 access_size_min = 1;
1152 access_size_max = mr->ops->valid.max_access_size;
1153 if (!mr->ops->valid.max_access_size) {
1154 access_size_max = 4;
1157 access_size = MAX(MIN(size, access_size_max), access_size_min);
1158 for (i = 0; i < size; i += access_size) {
1159 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1160 is_write)) {
1161 return false;
1165 return true;
1168 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1169 hwaddr addr,
1170 uint64_t *pval,
1171 unsigned size,
1172 MemTxAttrs attrs)
1174 *pval = 0;
1176 if (mr->ops->read) {
1177 return access_with_adjusted_size(addr, pval, size,
1178 mr->ops->impl.min_access_size,
1179 mr->ops->impl.max_access_size,
1180 memory_region_read_accessor,
1181 mr, attrs);
1182 } else if (mr->ops->read_with_attrs) {
1183 return access_with_adjusted_size(addr, pval, size,
1184 mr->ops->impl.min_access_size,
1185 mr->ops->impl.max_access_size,
1186 memory_region_read_with_attrs_accessor,
1187 mr, attrs);
1188 } else {
1189 return access_with_adjusted_size(addr, pval, size, 1, 4,
1190 memory_region_oldmmio_read_accessor,
1191 mr, attrs);
1195 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1196 hwaddr addr,
1197 uint64_t *pval,
1198 unsigned size,
1199 MemTxAttrs attrs)
1201 MemTxResult r;
1203 if (!memory_region_access_valid(mr, addr, size, false)) {
1204 *pval = unassigned_mem_read(mr, addr, size);
1205 return MEMTX_DECODE_ERROR;
1208 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1209 adjust_endianness(mr, pval, size);
1210 return r;
1213 /* Return true if an eventfd was signalled */
1214 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1215 hwaddr addr,
1216 uint64_t data,
1217 unsigned size,
1218 MemTxAttrs attrs)
1220 MemoryRegionIoeventfd ioeventfd = {
1221 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1222 .data = data,
1224 unsigned i;
1226 for (i = 0; i < mr->ioeventfd_nb; i++) {
1227 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1228 ioeventfd.e = mr->ioeventfds[i].e;
1230 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1231 event_notifier_set(ioeventfd.e);
1232 return true;
1236 return false;
1239 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1240 hwaddr addr,
1241 uint64_t data,
1242 unsigned size,
1243 MemTxAttrs attrs)
1245 if (!memory_region_access_valid(mr, addr, size, true)) {
1246 unassigned_mem_write(mr, addr, data, size);
1247 return MEMTX_DECODE_ERROR;
1250 adjust_endianness(mr, &data, size);
1252 if ((!kvm_eventfds_enabled()) &&
1253 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1254 return MEMTX_OK;
1257 if (mr->ops->write) {
1258 return access_with_adjusted_size(addr, &data, size,
1259 mr->ops->impl.min_access_size,
1260 mr->ops->impl.max_access_size,
1261 memory_region_write_accessor, mr,
1262 attrs);
1263 } else if (mr->ops->write_with_attrs) {
1264 return
1265 access_with_adjusted_size(addr, &data, size,
1266 mr->ops->impl.min_access_size,
1267 mr->ops->impl.max_access_size,
1268 memory_region_write_with_attrs_accessor,
1269 mr, attrs);
1270 } else {
1271 return access_with_adjusted_size(addr, &data, size, 1, 4,
1272 memory_region_oldmmio_write_accessor,
1273 mr, attrs);
1277 void memory_region_init_io(MemoryRegion *mr,
1278 Object *owner,
1279 const MemoryRegionOps *ops,
1280 void *opaque,
1281 const char *name,
1282 uint64_t size)
1284 memory_region_init(mr, owner, name, size);
1285 mr->ops = ops ? ops : &unassigned_mem_ops;
1286 mr->opaque = opaque;
1287 mr->terminates = true;
1290 void memory_region_init_ram(MemoryRegion *mr,
1291 Object *owner,
1292 const char *name,
1293 uint64_t size,
1294 Error **errp)
1296 memory_region_init(mr, owner, name, size);
1297 mr->ram = true;
1298 mr->terminates = true;
1299 mr->destructor = memory_region_destructor_ram;
1300 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1301 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1304 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1305 Object *owner,
1306 const char *name,
1307 uint64_t size,
1308 uint64_t max_size,
1309 void (*resized)(const char*,
1310 uint64_t length,
1311 void *host),
1312 Error **errp)
1314 memory_region_init(mr, owner, name, size);
1315 mr->ram = true;
1316 mr->terminates = true;
1317 mr->destructor = memory_region_destructor_ram;
1318 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1319 mr, errp);
1320 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1323 #ifdef __linux__
1324 void memory_region_init_ram_from_file(MemoryRegion *mr,
1325 struct Object *owner,
1326 const char *name,
1327 uint64_t size,
1328 bool share,
1329 const char *path,
1330 Error **errp)
1332 memory_region_init(mr, owner, name, size);
1333 mr->ram = true;
1334 mr->terminates = true;
1335 mr->destructor = memory_region_destructor_ram;
1336 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1337 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1339 #endif
1341 void memory_region_init_ram_ptr(MemoryRegion *mr,
1342 Object *owner,
1343 const char *name,
1344 uint64_t size,
1345 void *ptr)
1347 memory_region_init(mr, owner, name, size);
1348 mr->ram = true;
1349 mr->terminates = true;
1350 mr->destructor = memory_region_destructor_ram;
1351 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1353 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1354 assert(ptr != NULL);
1355 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1358 void memory_region_set_skip_dump(MemoryRegion *mr)
1360 mr->skip_dump = true;
1363 void memory_region_init_alias(MemoryRegion *mr,
1364 Object *owner,
1365 const char *name,
1366 MemoryRegion *orig,
1367 hwaddr offset,
1368 uint64_t size)
1370 memory_region_init(mr, owner, name, size);
1371 mr->alias = orig;
1372 mr->alias_offset = offset;
1375 void memory_region_init_rom(MemoryRegion *mr,
1376 struct Object *owner,
1377 const char *name,
1378 uint64_t size,
1379 Error **errp)
1381 memory_region_init(mr, owner, name, size);
1382 mr->ram = true;
1383 mr->readonly = true;
1384 mr->terminates = true;
1385 mr->destructor = memory_region_destructor_ram;
1386 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1387 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1390 void memory_region_init_rom_device(MemoryRegion *mr,
1391 Object *owner,
1392 const MemoryRegionOps *ops,
1393 void *opaque,
1394 const char *name,
1395 uint64_t size,
1396 Error **errp)
1398 assert(ops);
1399 memory_region_init(mr, owner, name, size);
1400 mr->ops = ops;
1401 mr->opaque = opaque;
1402 mr->terminates = true;
1403 mr->rom_device = true;
1404 mr->destructor = memory_region_destructor_ram;
1405 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1408 void memory_region_init_iommu(MemoryRegion *mr,
1409 Object *owner,
1410 const MemoryRegionIOMMUOps *ops,
1411 const char *name,
1412 uint64_t size)
1414 memory_region_init(mr, owner, name, size);
1415 mr->iommu_ops = ops,
1416 mr->terminates = true; /* then re-forwards */
1417 QLIST_INIT(&mr->iommu_notify);
1418 mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1421 static void memory_region_finalize(Object *obj)
1423 MemoryRegion *mr = MEMORY_REGION(obj);
1425 assert(!mr->container);
1427 /* We know the region is not visible in any address space (it
1428 * does not have a container and cannot be a root either because
1429 * it has no references, so we can blindly clear mr->enabled.
1430 * memory_region_set_enabled instead could trigger a transaction
1431 * and cause an infinite loop.
1433 mr->enabled = false;
1434 memory_region_transaction_begin();
1435 while (!QTAILQ_EMPTY(&mr->subregions)) {
1436 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1437 memory_region_del_subregion(mr, subregion);
1439 memory_region_transaction_commit();
1441 mr->destructor(mr);
1442 memory_region_clear_coalescing(mr);
1443 g_free((char *)mr->name);
1444 g_free(mr->ioeventfds);
1447 Object *memory_region_owner(MemoryRegion *mr)
1449 Object *obj = OBJECT(mr);
1450 return obj->parent;
1453 void memory_region_ref(MemoryRegion *mr)
1455 /* MMIO callbacks most likely will access data that belongs
1456 * to the owner, hence the need to ref/unref the owner whenever
1457 * the memory region is in use.
1459 * The memory region is a child of its owner. As long as the
1460 * owner doesn't call unparent itself on the memory region,
1461 * ref-ing the owner will also keep the memory region alive.
1462 * Memory regions without an owner are supposed to never go away;
1463 * we do not ref/unref them because it slows down DMA sensibly.
1465 if (mr && mr->owner) {
1466 object_ref(mr->owner);
1470 void memory_region_unref(MemoryRegion *mr)
1472 if (mr && mr->owner) {
1473 object_unref(mr->owner);
1477 uint64_t memory_region_size(MemoryRegion *mr)
1479 if (int128_eq(mr->size, int128_2_64())) {
1480 return UINT64_MAX;
1482 return int128_get64(mr->size);
1485 const char *memory_region_name(const MemoryRegion *mr)
1487 if (!mr->name) {
1488 ((MemoryRegion *)mr)->name =
1489 object_get_canonical_path_component(OBJECT(mr));
1491 return mr->name;
1494 bool memory_region_is_skip_dump(MemoryRegion *mr)
1496 return mr->skip_dump;
1499 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1501 uint8_t mask = mr->dirty_log_mask;
1502 if (global_dirty_log && mr->ram_block) {
1503 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1505 return mask;
1508 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1510 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1513 static void memory_region_update_iommu_notify_flags(MemoryRegion *mr)
1515 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1516 IOMMUNotifier *iommu_notifier;
1518 QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
1519 flags |= iommu_notifier->notifier_flags;
1522 if (flags != mr->iommu_notify_flags &&
1523 mr->iommu_ops->notify_flag_changed) {
1524 mr->iommu_ops->notify_flag_changed(mr, mr->iommu_notify_flags,
1525 flags);
1528 mr->iommu_notify_flags = flags;
1531 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1532 IOMMUNotifier *n)
1534 /* We need to register for at least one bitfield */
1535 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1536 QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
1537 memory_region_update_iommu_notify_flags(mr);
1540 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
1542 assert(memory_region_is_iommu(mr));
1543 if (mr->iommu_ops && mr->iommu_ops->get_min_page_size) {
1544 return mr->iommu_ops->get_min_page_size(mr);
1546 return TARGET_PAGE_SIZE;
1549 void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
1550 bool is_write)
1552 hwaddr addr, granularity;
1553 IOMMUTLBEntry iotlb;
1555 granularity = memory_region_iommu_get_min_page_size(mr);
1557 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1558 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
1559 if (iotlb.perm != IOMMU_NONE) {
1560 n->notify(n, &iotlb);
1563 /* if (2^64 - MR size) < granularity, it's possible to get an
1564 * infinite loop here. This should catch such a wraparound */
1565 if ((addr + granularity) < addr) {
1566 break;
1571 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1572 IOMMUNotifier *n)
1574 QLIST_REMOVE(n, node);
1575 memory_region_update_iommu_notify_flags(mr);
1578 void memory_region_notify_iommu(MemoryRegion *mr,
1579 IOMMUTLBEntry entry)
1581 IOMMUNotifier *iommu_notifier;
1582 IOMMUNotifierFlag request_flags;
1584 assert(memory_region_is_iommu(mr));
1586 if (entry.perm & IOMMU_RW) {
1587 request_flags = IOMMU_NOTIFIER_MAP;
1588 } else {
1589 request_flags = IOMMU_NOTIFIER_UNMAP;
1592 QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) {
1593 if (iommu_notifier->notifier_flags & request_flags) {
1594 iommu_notifier->notify(iommu_notifier, &entry);
1599 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1601 uint8_t mask = 1 << client;
1602 uint8_t old_logging;
1604 assert(client == DIRTY_MEMORY_VGA);
1605 old_logging = mr->vga_logging_count;
1606 mr->vga_logging_count += log ? 1 : -1;
1607 if (!!old_logging == !!mr->vga_logging_count) {
1608 return;
1611 memory_region_transaction_begin();
1612 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1613 memory_region_update_pending |= mr->enabled;
1614 memory_region_transaction_commit();
1617 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1618 hwaddr size, unsigned client)
1620 assert(mr->ram_block);
1621 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1622 size, client);
1625 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1626 hwaddr size)
1628 assert(mr->ram_block);
1629 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1630 size,
1631 memory_region_get_dirty_log_mask(mr));
1634 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1635 hwaddr size, unsigned client)
1637 assert(mr->ram_block);
1638 return cpu_physical_memory_test_and_clear_dirty(
1639 memory_region_get_ram_addr(mr) + addr, size, client);
1643 void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1645 MemoryListener *listener;
1646 AddressSpace *as;
1647 FlatView *view;
1648 FlatRange *fr;
1650 /* If the same address space has multiple log_sync listeners, we
1651 * visit that address space's FlatView multiple times. But because
1652 * log_sync listeners are rare, it's still cheaper than walking each
1653 * address space once.
1655 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1656 if (!listener->log_sync) {
1657 continue;
1659 as = listener->address_space;
1660 view = address_space_get_flatview(as);
1661 FOR_EACH_FLAT_RANGE(fr, view) {
1662 if (fr->mr == mr) {
1663 MemoryRegionSection mrs = section_from_flat_range(fr, as);
1664 listener->log_sync(listener, &mrs);
1667 flatview_unref(view);
1671 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1673 if (mr->readonly != readonly) {
1674 memory_region_transaction_begin();
1675 mr->readonly = readonly;
1676 memory_region_update_pending |= mr->enabled;
1677 memory_region_transaction_commit();
1681 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
1683 if (mr->romd_mode != romd_mode) {
1684 memory_region_transaction_begin();
1685 mr->romd_mode = romd_mode;
1686 memory_region_update_pending |= mr->enabled;
1687 memory_region_transaction_commit();
1691 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1692 hwaddr size, unsigned client)
1694 assert(mr->ram_block);
1695 cpu_physical_memory_test_and_clear_dirty(
1696 memory_region_get_ram_addr(mr) + addr, size, client);
1699 int memory_region_get_fd(MemoryRegion *mr)
1701 int fd;
1703 rcu_read_lock();
1704 while (mr->alias) {
1705 mr = mr->alias;
1707 fd = mr->ram_block->fd;
1708 rcu_read_unlock();
1710 return fd;
1713 void memory_region_set_fd(MemoryRegion *mr, int fd)
1715 rcu_read_lock();
1716 while (mr->alias) {
1717 mr = mr->alias;
1719 mr->ram_block->fd = fd;
1720 rcu_read_unlock();
1723 void *memory_region_get_ram_ptr(MemoryRegion *mr)
1725 void *ptr;
1726 uint64_t offset = 0;
1728 rcu_read_lock();
1729 while (mr->alias) {
1730 offset += mr->alias_offset;
1731 mr = mr->alias;
1733 assert(mr->ram_block);
1734 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
1735 rcu_read_unlock();
1737 return ptr;
1740 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1742 RAMBlock *block;
1744 block = qemu_ram_block_from_host(ptr, false, offset);
1745 if (!block) {
1746 return NULL;
1749 return block->mr;
1752 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1754 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1757 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1759 assert(mr->ram_block);
1761 qemu_ram_resize(mr->ram_block, newsize, errp);
1764 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
1766 FlatView *view;
1767 FlatRange *fr;
1768 CoalescedMemoryRange *cmr;
1769 AddrRange tmp;
1770 MemoryRegionSection section;
1772 view = address_space_get_flatview(as);
1773 FOR_EACH_FLAT_RANGE(fr, view) {
1774 if (fr->mr == mr) {
1775 section = (MemoryRegionSection) {
1776 .address_space = as,
1777 .offset_within_address_space = int128_get64(fr->addr.start),
1778 .size = fr->addr.size,
1781 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
1782 int128_get64(fr->addr.start),
1783 int128_get64(fr->addr.size));
1784 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1785 tmp = addrrange_shift(cmr->addr,
1786 int128_sub(fr->addr.start,
1787 int128_make64(fr->offset_in_region)));
1788 if (!addrrange_intersects(tmp, fr->addr)) {
1789 continue;
1791 tmp = addrrange_intersection(tmp, fr->addr);
1792 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
1793 int128_get64(tmp.start),
1794 int128_get64(tmp.size));
1798 flatview_unref(view);
1801 static void memory_region_update_coalesced_range(MemoryRegion *mr)
1803 AddressSpace *as;
1805 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1806 memory_region_update_coalesced_range_as(mr, as);
1810 void memory_region_set_coalescing(MemoryRegion *mr)
1812 memory_region_clear_coalescing(mr);
1813 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1816 void memory_region_add_coalescing(MemoryRegion *mr,
1817 hwaddr offset,
1818 uint64_t size)
1820 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
1822 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
1823 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1824 memory_region_update_coalesced_range(mr);
1825 memory_region_set_flush_coalesced(mr);
1828 void memory_region_clear_coalescing(MemoryRegion *mr)
1830 CoalescedMemoryRange *cmr;
1831 bool updated = false;
1833 qemu_flush_coalesced_mmio_buffer();
1834 mr->flush_coalesced_mmio = false;
1836 while (!QTAILQ_EMPTY(&mr->coalesced)) {
1837 cmr = QTAILQ_FIRST(&mr->coalesced);
1838 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
1839 g_free(cmr);
1840 updated = true;
1843 if (updated) {
1844 memory_region_update_coalesced_range(mr);
1848 void memory_region_set_flush_coalesced(MemoryRegion *mr)
1850 mr->flush_coalesced_mmio = true;
1853 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
1855 qemu_flush_coalesced_mmio_buffer();
1856 if (QTAILQ_EMPTY(&mr->coalesced)) {
1857 mr->flush_coalesced_mmio = false;
1861 void memory_region_set_global_locking(MemoryRegion *mr)
1863 mr->global_locking = true;
1866 void memory_region_clear_global_locking(MemoryRegion *mr)
1868 mr->global_locking = false;
1871 static bool userspace_eventfd_warning;
1873 void memory_region_add_eventfd(MemoryRegion *mr,
1874 hwaddr addr,
1875 unsigned size,
1876 bool match_data,
1877 uint64_t data,
1878 EventNotifier *e)
1880 MemoryRegionIoeventfd mrfd = {
1881 .addr.start = int128_make64(addr),
1882 .addr.size = int128_make64(size),
1883 .match_data = match_data,
1884 .data = data,
1885 .e = e,
1887 unsigned i;
1889 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
1890 userspace_eventfd_warning))) {
1891 userspace_eventfd_warning = true;
1892 error_report("Using eventfd without MMIO binding in KVM. "
1893 "Suboptimal performance expected");
1896 if (size) {
1897 adjust_endianness(mr, &mrfd.data, size);
1899 memory_region_transaction_begin();
1900 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1901 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1902 break;
1905 ++mr->ioeventfd_nb;
1906 mr->ioeventfds = g_realloc(mr->ioeventfds,
1907 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1908 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1909 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1910 mr->ioeventfds[i] = mrfd;
1911 ioeventfd_update_pending |= mr->enabled;
1912 memory_region_transaction_commit();
1915 void memory_region_del_eventfd(MemoryRegion *mr,
1916 hwaddr addr,
1917 unsigned size,
1918 bool match_data,
1919 uint64_t data,
1920 EventNotifier *e)
1922 MemoryRegionIoeventfd mrfd = {
1923 .addr.start = int128_make64(addr),
1924 .addr.size = int128_make64(size),
1925 .match_data = match_data,
1926 .data = data,
1927 .e = e,
1929 unsigned i;
1931 if (size) {
1932 adjust_endianness(mr, &mrfd.data, size);
1934 memory_region_transaction_begin();
1935 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1936 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1937 break;
1940 assert(i != mr->ioeventfd_nb);
1941 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1942 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1943 --mr->ioeventfd_nb;
1944 mr->ioeventfds = g_realloc(mr->ioeventfds,
1945 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1946 ioeventfd_update_pending |= mr->enabled;
1947 memory_region_transaction_commit();
1950 static void memory_region_update_container_subregions(MemoryRegion *subregion)
1952 MemoryRegion *mr = subregion->container;
1953 MemoryRegion *other;
1955 memory_region_transaction_begin();
1957 memory_region_ref(subregion);
1958 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1959 if (subregion->priority >= other->priority) {
1960 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1961 goto done;
1964 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1965 done:
1966 memory_region_update_pending |= mr->enabled && subregion->enabled;
1967 memory_region_transaction_commit();
1970 static void memory_region_add_subregion_common(MemoryRegion *mr,
1971 hwaddr offset,
1972 MemoryRegion *subregion)
1974 assert(!subregion->container);
1975 subregion->container = mr;
1976 subregion->addr = offset;
1977 memory_region_update_container_subregions(subregion);
1980 void memory_region_add_subregion(MemoryRegion *mr,
1981 hwaddr offset,
1982 MemoryRegion *subregion)
1984 subregion->priority = 0;
1985 memory_region_add_subregion_common(mr, offset, subregion);
1988 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1989 hwaddr offset,
1990 MemoryRegion *subregion,
1991 int priority)
1993 subregion->priority = priority;
1994 memory_region_add_subregion_common(mr, offset, subregion);
1997 void memory_region_del_subregion(MemoryRegion *mr,
1998 MemoryRegion *subregion)
2000 memory_region_transaction_begin();
2001 assert(subregion->container == mr);
2002 subregion->container = NULL;
2003 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2004 memory_region_unref(subregion);
2005 memory_region_update_pending |= mr->enabled && subregion->enabled;
2006 memory_region_transaction_commit();
2009 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2011 if (enabled == mr->enabled) {
2012 return;
2014 memory_region_transaction_begin();
2015 mr->enabled = enabled;
2016 memory_region_update_pending = true;
2017 memory_region_transaction_commit();
2020 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2022 Int128 s = int128_make64(size);
2024 if (size == UINT64_MAX) {
2025 s = int128_2_64();
2027 if (int128_eq(s, mr->size)) {
2028 return;
2030 memory_region_transaction_begin();
2031 mr->size = s;
2032 memory_region_update_pending = true;
2033 memory_region_transaction_commit();
2036 static void memory_region_readd_subregion(MemoryRegion *mr)
2038 MemoryRegion *container = mr->container;
2040 if (container) {
2041 memory_region_transaction_begin();
2042 memory_region_ref(mr);
2043 memory_region_del_subregion(container, mr);
2044 mr->container = container;
2045 memory_region_update_container_subregions(mr);
2046 memory_region_unref(mr);
2047 memory_region_transaction_commit();
2051 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2053 if (addr != mr->addr) {
2054 mr->addr = addr;
2055 memory_region_readd_subregion(mr);
2059 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2061 assert(mr->alias);
2063 if (offset == mr->alias_offset) {
2064 return;
2067 memory_region_transaction_begin();
2068 mr->alias_offset = offset;
2069 memory_region_update_pending |= mr->enabled;
2070 memory_region_transaction_commit();
2073 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2075 return mr->align;
2078 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2080 const AddrRange *addr = addr_;
2081 const FlatRange *fr = fr_;
2083 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2084 return -1;
2085 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2086 return 1;
2088 return 0;
2091 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2093 return bsearch(&addr, view->ranges, view->nr,
2094 sizeof(FlatRange), cmp_flatrange_addr);
2097 bool memory_region_is_mapped(MemoryRegion *mr)
2099 return mr->container ? true : false;
2102 /* Same as memory_region_find, but it does not add a reference to the
2103 * returned region. It must be called from an RCU critical section.
2105 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2106 hwaddr addr, uint64_t size)
2108 MemoryRegionSection ret = { .mr = NULL };
2109 MemoryRegion *root;
2110 AddressSpace *as;
2111 AddrRange range;
2112 FlatView *view;
2113 FlatRange *fr;
2115 addr += mr->addr;
2116 for (root = mr; root->container; ) {
2117 root = root->container;
2118 addr += root->addr;
2121 as = memory_region_to_address_space(root);
2122 if (!as) {
2123 return ret;
2125 range = addrrange_make(int128_make64(addr), int128_make64(size));
2127 view = atomic_rcu_read(&as->current_map);
2128 fr = flatview_lookup(view, range);
2129 if (!fr) {
2130 return ret;
2133 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2134 --fr;
2137 ret.mr = fr->mr;
2138 ret.address_space = as;
2139 range = addrrange_intersection(range, fr->addr);
2140 ret.offset_within_region = fr->offset_in_region;
2141 ret.offset_within_region += int128_get64(int128_sub(range.start,
2142 fr->addr.start));
2143 ret.size = range.size;
2144 ret.offset_within_address_space = int128_get64(range.start);
2145 ret.readonly = fr->readonly;
2146 return ret;
2149 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2150 hwaddr addr, uint64_t size)
2152 MemoryRegionSection ret;
2153 rcu_read_lock();
2154 ret = memory_region_find_rcu(mr, addr, size);
2155 if (ret.mr) {
2156 memory_region_ref(ret.mr);
2158 rcu_read_unlock();
2159 return ret;
2162 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2164 MemoryRegion *mr;
2166 rcu_read_lock();
2167 mr = memory_region_find_rcu(container, addr, 1).mr;
2168 rcu_read_unlock();
2169 return mr && mr != container;
2172 void memory_global_dirty_log_sync(void)
2174 MemoryListener *listener;
2175 AddressSpace *as;
2176 FlatView *view;
2177 FlatRange *fr;
2179 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2180 if (!listener->log_sync) {
2181 continue;
2183 as = listener->address_space;
2184 view = address_space_get_flatview(as);
2185 FOR_EACH_FLAT_RANGE(fr, view) {
2186 if (fr->dirty_log_mask) {
2187 MemoryRegionSection mrs = section_from_flat_range(fr, as);
2188 listener->log_sync(listener, &mrs);
2191 flatview_unref(view);
2195 void memory_global_dirty_log_start(void)
2197 global_dirty_log = true;
2199 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2201 /* Refresh DIRTY_LOG_MIGRATION bit. */
2202 memory_region_transaction_begin();
2203 memory_region_update_pending = true;
2204 memory_region_transaction_commit();
2207 void memory_global_dirty_log_stop(void)
2209 global_dirty_log = false;
2211 /* Refresh DIRTY_LOG_MIGRATION bit. */
2212 memory_region_transaction_begin();
2213 memory_region_update_pending = true;
2214 memory_region_transaction_commit();
2216 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2219 static void listener_add_address_space(MemoryListener *listener,
2220 AddressSpace *as)
2222 FlatView *view;
2223 FlatRange *fr;
2225 if (listener->begin) {
2226 listener->begin(listener);
2228 if (global_dirty_log) {
2229 if (listener->log_global_start) {
2230 listener->log_global_start(listener);
2234 view = address_space_get_flatview(as);
2235 FOR_EACH_FLAT_RANGE(fr, view) {
2236 MemoryRegionSection section = {
2237 .mr = fr->mr,
2238 .address_space = as,
2239 .offset_within_region = fr->offset_in_region,
2240 .size = fr->addr.size,
2241 .offset_within_address_space = int128_get64(fr->addr.start),
2242 .readonly = fr->readonly,
2244 if (fr->dirty_log_mask && listener->log_start) {
2245 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2247 if (listener->region_add) {
2248 listener->region_add(listener, &section);
2251 if (listener->commit) {
2252 listener->commit(listener);
2254 flatview_unref(view);
2257 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2259 MemoryListener *other = NULL;
2261 listener->address_space = as;
2262 if (QTAILQ_EMPTY(&memory_listeners)
2263 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2264 memory_listeners)->priority) {
2265 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2266 } else {
2267 QTAILQ_FOREACH(other, &memory_listeners, link) {
2268 if (listener->priority < other->priority) {
2269 break;
2272 QTAILQ_INSERT_BEFORE(other, listener, link);
2275 if (QTAILQ_EMPTY(&as->listeners)
2276 || listener->priority >= QTAILQ_LAST(&as->listeners,
2277 memory_listeners)->priority) {
2278 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2279 } else {
2280 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2281 if (listener->priority < other->priority) {
2282 break;
2285 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2288 listener_add_address_space(listener, as);
2291 void memory_listener_unregister(MemoryListener *listener)
2293 QTAILQ_REMOVE(&memory_listeners, listener, link);
2294 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2297 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2299 memory_region_ref(root);
2300 memory_region_transaction_begin();
2301 as->ref_count = 1;
2302 as->root = root;
2303 as->malloced = false;
2304 as->current_map = g_new(FlatView, 1);
2305 flatview_init(as->current_map);
2306 as->ioeventfd_nb = 0;
2307 as->ioeventfds = NULL;
2308 QTAILQ_INIT(&as->listeners);
2309 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2310 as->name = g_strdup(name ? name : "anonymous");
2311 address_space_init_dispatch(as);
2312 memory_region_update_pending |= root->enabled;
2313 memory_region_transaction_commit();
2316 static void do_address_space_destroy(AddressSpace *as)
2318 bool do_free = as->malloced;
2320 address_space_destroy_dispatch(as);
2321 assert(QTAILQ_EMPTY(&as->listeners));
2323 flatview_unref(as->current_map);
2324 g_free(as->name);
2325 g_free(as->ioeventfds);
2326 memory_region_unref(as->root);
2327 if (do_free) {
2328 g_free(as);
2332 AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2334 AddressSpace *as;
2336 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2337 if (root == as->root && as->malloced) {
2338 as->ref_count++;
2339 return as;
2343 as = g_malloc0(sizeof *as);
2344 address_space_init(as, root, name);
2345 as->malloced = true;
2346 return as;
2349 void address_space_destroy(AddressSpace *as)
2351 MemoryRegion *root = as->root;
2353 as->ref_count--;
2354 if (as->ref_count) {
2355 return;
2357 /* Flush out anything from MemoryListeners listening in on this */
2358 memory_region_transaction_begin();
2359 as->root = NULL;
2360 memory_region_transaction_commit();
2361 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2362 address_space_unregister(as);
2364 /* At this point, as->dispatch and as->current_map are dummy
2365 * entries that the guest should never use. Wait for the old
2366 * values to expire before freeing the data.
2368 as->root = root;
2369 call_rcu(as, do_address_space_destroy, rcu);
2372 typedef struct MemoryRegionList MemoryRegionList;
2374 struct MemoryRegionList {
2375 const MemoryRegion *mr;
2376 QTAILQ_ENTRY(MemoryRegionList) queue;
2379 typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
2381 static void mtree_print_mr(fprintf_function mon_printf, void *f,
2382 const MemoryRegion *mr, unsigned int level,
2383 hwaddr base,
2384 MemoryRegionListHead *alias_print_queue)
2386 MemoryRegionList *new_ml, *ml, *next_ml;
2387 MemoryRegionListHead submr_print_queue;
2388 const MemoryRegion *submr;
2389 unsigned int i;
2391 if (!mr) {
2392 return;
2395 for (i = 0; i < level; i++) {
2396 mon_printf(f, " ");
2399 if (mr->alias) {
2400 MemoryRegionList *ml;
2401 bool found = false;
2403 /* check if the alias is already in the queue */
2404 QTAILQ_FOREACH(ml, alias_print_queue, queue) {
2405 if (ml->mr == mr->alias) {
2406 found = true;
2410 if (!found) {
2411 ml = g_new(MemoryRegionList, 1);
2412 ml->mr = mr->alias;
2413 QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
2415 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2416 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
2417 "-" TARGET_FMT_plx "%s\n",
2418 base + mr->addr,
2419 base + mr->addr
2420 + (int128_nz(mr->size) ?
2421 (hwaddr)int128_get64(int128_sub(mr->size,
2422 int128_one())) : 0),
2423 mr->priority,
2424 mr->romd_mode ? 'R' : '-',
2425 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2426 : '-',
2427 memory_region_name(mr),
2428 memory_region_name(mr->alias),
2429 mr->alias_offset,
2430 mr->alias_offset
2431 + (int128_nz(mr->size) ?
2432 (hwaddr)int128_get64(int128_sub(mr->size,
2433 int128_one())) : 0),
2434 mr->enabled ? "" : " [disabled]");
2435 } else {
2436 mon_printf(f,
2437 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n",
2438 base + mr->addr,
2439 base + mr->addr
2440 + (int128_nz(mr->size) ?
2441 (hwaddr)int128_get64(int128_sub(mr->size,
2442 int128_one())) : 0),
2443 mr->priority,
2444 mr->romd_mode ? 'R' : '-',
2445 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2446 : '-',
2447 memory_region_name(mr),
2448 mr->enabled ? "" : " [disabled]");
2451 QTAILQ_INIT(&submr_print_queue);
2453 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2454 new_ml = g_new(MemoryRegionList, 1);
2455 new_ml->mr = submr;
2456 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2457 if (new_ml->mr->addr < ml->mr->addr ||
2458 (new_ml->mr->addr == ml->mr->addr &&
2459 new_ml->mr->priority > ml->mr->priority)) {
2460 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
2461 new_ml = NULL;
2462 break;
2465 if (new_ml) {
2466 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
2470 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2471 mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
2472 alias_print_queue);
2475 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
2476 g_free(ml);
2480 void mtree_info(fprintf_function mon_printf, void *f)
2482 MemoryRegionListHead ml_head;
2483 MemoryRegionList *ml, *ml2;
2484 AddressSpace *as;
2486 QTAILQ_INIT(&ml_head);
2488 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2489 mon_printf(f, "address-space: %s\n", as->name);
2490 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2491 mon_printf(f, "\n");
2494 /* print aliased regions */
2495 QTAILQ_FOREACH(ml, &ml_head, queue) {
2496 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2497 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2498 mon_printf(f, "\n");
2501 QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
2502 g_free(ml);
2506 static const TypeInfo memory_region_info = {
2507 .parent = TYPE_OBJECT,
2508 .name = TYPE_MEMORY_REGION,
2509 .instance_size = sizeof(MemoryRegion),
2510 .instance_init = memory_region_initfn,
2511 .instance_finalize = memory_region_finalize,
2514 static void memory_register_types(void)
2516 type_register_static(&memory_region_info);
2519 type_init(memory_register_types)