block: Kill .bdrv_co_discard()
[qemu/ar7.git] / memory.c
blob0eb6895fe6a1535dd44a71e4b65221119b7df438
1 /*
2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "cpu.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "exec/ioport.h"
23 #include "qapi/visitor.h"
24 #include "qemu/bitops.h"
25 #include "qemu/error-report.h"
26 #include "qom/object.h"
27 #include "trace.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
34 //#define DEBUG_UNASSIGNED
36 static unsigned memory_region_transaction_depth;
37 static bool memory_region_update_pending;
38 static bool ioeventfd_update_pending;
39 static bool global_dirty_log = false;
41 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
42 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
44 static QTAILQ_HEAD(, AddressSpace) address_spaces
45 = QTAILQ_HEAD_INITIALIZER(address_spaces);
47 typedef struct AddrRange AddrRange;
50 * Note that signed integers are needed for negative offsetting in aliases
51 * (large MemoryRegion::alias_offset).
53 struct AddrRange {
54 Int128 start;
55 Int128 size;
58 static AddrRange addrrange_make(Int128 start, Int128 size)
60 return (AddrRange) { start, size };
63 static bool addrrange_equal(AddrRange r1, AddrRange r2)
65 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
68 static Int128 addrrange_end(AddrRange r)
70 return int128_add(r.start, r.size);
73 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
75 int128_addto(&range.start, delta);
76 return range;
79 static bool addrrange_contains(AddrRange range, Int128 addr)
81 return int128_ge(addr, range.start)
82 && int128_lt(addr, addrrange_end(range));
85 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
87 return addrrange_contains(r1, r2.start)
88 || addrrange_contains(r2, r1.start);
91 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
93 Int128 start = int128_max(r1.start, r2.start);
94 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
95 return addrrange_make(start, int128_sub(end, start));
98 enum ListenerDirection { Forward, Reverse };
100 static bool memory_listener_match(MemoryListener *listener,
101 MemoryRegionSection *section)
103 return !listener->address_space_filter
104 || listener->address_space_filter == section->address_space;
107 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
108 do { \
109 MemoryListener *_listener; \
111 switch (_direction) { \
112 case Forward: \
113 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
114 if (_listener->_callback) { \
115 _listener->_callback(_listener, ##_args); \
118 break; \
119 case Reverse: \
120 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
121 memory_listeners, link) { \
122 if (_listener->_callback) { \
123 _listener->_callback(_listener, ##_args); \
126 break; \
127 default: \
128 abort(); \
130 } while (0)
132 #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
133 do { \
134 MemoryListener *_listener; \
136 switch (_direction) { \
137 case Forward: \
138 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
139 if (_listener->_callback \
140 && memory_listener_match(_listener, _section)) { \
141 _listener->_callback(_listener, _section, ##_args); \
144 break; \
145 case Reverse: \
146 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
147 memory_listeners, link) { \
148 if (_listener->_callback \
149 && memory_listener_match(_listener, _section)) { \
150 _listener->_callback(_listener, _section, ##_args); \
153 break; \
154 default: \
155 abort(); \
157 } while (0)
159 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
160 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
161 MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \
162 .mr = (fr)->mr, \
163 .address_space = (as), \
164 .offset_within_region = (fr)->offset_in_region, \
165 .size = (fr)->addr.size, \
166 .offset_within_address_space = int128_get64((fr)->addr.start), \
167 .readonly = (fr)->readonly, \
168 }), ##_args)
170 struct CoalescedMemoryRange {
171 AddrRange addr;
172 QTAILQ_ENTRY(CoalescedMemoryRange) link;
175 struct MemoryRegionIoeventfd {
176 AddrRange addr;
177 bool match_data;
178 uint64_t data;
179 EventNotifier *e;
182 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
183 MemoryRegionIoeventfd b)
185 if (int128_lt(a.addr.start, b.addr.start)) {
186 return true;
187 } else if (int128_gt(a.addr.start, b.addr.start)) {
188 return false;
189 } else if (int128_lt(a.addr.size, b.addr.size)) {
190 return true;
191 } else if (int128_gt(a.addr.size, b.addr.size)) {
192 return false;
193 } else if (a.match_data < b.match_data) {
194 return true;
195 } else if (a.match_data > b.match_data) {
196 return false;
197 } else if (a.match_data) {
198 if (a.data < b.data) {
199 return true;
200 } else if (a.data > b.data) {
201 return false;
204 if (a.e < b.e) {
205 return true;
206 } else if (a.e > b.e) {
207 return false;
209 return false;
212 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
213 MemoryRegionIoeventfd b)
215 return !memory_region_ioeventfd_before(a, b)
216 && !memory_region_ioeventfd_before(b, a);
219 typedef struct FlatRange FlatRange;
220 typedef struct FlatView FlatView;
222 /* Range of memory in the global map. Addresses are absolute. */
223 struct FlatRange {
224 MemoryRegion *mr;
225 hwaddr offset_in_region;
226 AddrRange addr;
227 uint8_t dirty_log_mask;
228 bool romd_mode;
229 bool readonly;
232 /* Flattened global view of current active memory hierarchy. Kept in sorted
233 * order.
235 struct FlatView {
236 struct rcu_head rcu;
237 unsigned ref;
238 FlatRange *ranges;
239 unsigned nr;
240 unsigned nr_allocated;
243 typedef struct AddressSpaceOps AddressSpaceOps;
245 #define FOR_EACH_FLAT_RANGE(var, view) \
246 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
248 static bool flatrange_equal(FlatRange *a, FlatRange *b)
250 return a->mr == b->mr
251 && addrrange_equal(a->addr, b->addr)
252 && a->offset_in_region == b->offset_in_region
253 && a->romd_mode == b->romd_mode
254 && a->readonly == b->readonly;
257 static void flatview_init(FlatView *view)
259 view->ref = 1;
260 view->ranges = NULL;
261 view->nr = 0;
262 view->nr_allocated = 0;
265 /* Insert a range into a given position. Caller is responsible for maintaining
266 * sorting order.
268 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
270 if (view->nr == view->nr_allocated) {
271 view->nr_allocated = MAX(2 * view->nr, 10);
272 view->ranges = g_realloc(view->ranges,
273 view->nr_allocated * sizeof(*view->ranges));
275 memmove(view->ranges + pos + 1, view->ranges + pos,
276 (view->nr - pos) * sizeof(FlatRange));
277 view->ranges[pos] = *range;
278 memory_region_ref(range->mr);
279 ++view->nr;
282 static void flatview_destroy(FlatView *view)
284 int i;
286 for (i = 0; i < view->nr; i++) {
287 memory_region_unref(view->ranges[i].mr);
289 g_free(view->ranges);
290 g_free(view);
293 static void flatview_ref(FlatView *view)
295 atomic_inc(&view->ref);
298 static void flatview_unref(FlatView *view)
300 if (atomic_fetch_dec(&view->ref) == 1) {
301 flatview_destroy(view);
305 static bool can_merge(FlatRange *r1, FlatRange *r2)
307 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
308 && r1->mr == r2->mr
309 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
310 r1->addr.size),
311 int128_make64(r2->offset_in_region))
312 && r1->dirty_log_mask == r2->dirty_log_mask
313 && r1->romd_mode == r2->romd_mode
314 && r1->readonly == r2->readonly;
317 /* Attempt to simplify a view by merging adjacent ranges */
318 static void flatview_simplify(FlatView *view)
320 unsigned i, j;
322 i = 0;
323 while (i < view->nr) {
324 j = i + 1;
325 while (j < view->nr
326 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
327 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
328 ++j;
330 ++i;
331 memmove(&view->ranges[i], &view->ranges[j],
332 (view->nr - j) * sizeof(view->ranges[j]));
333 view->nr -= j - i;
337 static bool memory_region_big_endian(MemoryRegion *mr)
339 #ifdef TARGET_WORDS_BIGENDIAN
340 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
341 #else
342 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
343 #endif
346 static bool memory_region_wrong_endianness(MemoryRegion *mr)
348 #ifdef TARGET_WORDS_BIGENDIAN
349 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
350 #else
351 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
352 #endif
355 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
357 if (memory_region_wrong_endianness(mr)) {
358 switch (size) {
359 case 1:
360 break;
361 case 2:
362 *data = bswap16(*data);
363 break;
364 case 4:
365 *data = bswap32(*data);
366 break;
367 case 8:
368 *data = bswap64(*data);
369 break;
370 default:
371 abort();
376 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
378 MemoryRegion *root;
379 hwaddr abs_addr = offset;
381 abs_addr += mr->addr;
382 for (root = mr; root->container; ) {
383 root = root->container;
384 abs_addr += root->addr;
387 return abs_addr;
390 static int get_cpu_index(void)
392 if (current_cpu) {
393 return current_cpu->cpu_index;
395 return -1;
398 static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
399 hwaddr addr,
400 uint64_t *value,
401 unsigned size,
402 unsigned shift,
403 uint64_t mask,
404 MemTxAttrs attrs)
406 uint64_t tmp;
408 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
409 if (mr->subpage) {
410 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
411 } else if (mr == &io_mem_notdirty) {
412 /* Accesses to code which has previously been translated into a TB show
413 * up in the MMIO path, as accesses to the io_mem_notdirty
414 * MemoryRegion. */
415 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
416 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
417 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
418 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
420 *value |= (tmp & mask) << shift;
421 return MEMTX_OK;
424 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
425 hwaddr addr,
426 uint64_t *value,
427 unsigned size,
428 unsigned shift,
429 uint64_t mask,
430 MemTxAttrs attrs)
432 uint64_t tmp;
434 tmp = mr->ops->read(mr->opaque, addr, size);
435 if (mr->subpage) {
436 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
437 } else if (mr == &io_mem_notdirty) {
438 /* Accesses to code which has previously been translated into a TB show
439 * up in the MMIO path, as accesses to the io_mem_notdirty
440 * MemoryRegion. */
441 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
442 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
443 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
444 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
446 *value |= (tmp & mask) << shift;
447 return MEMTX_OK;
450 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
451 hwaddr addr,
452 uint64_t *value,
453 unsigned size,
454 unsigned shift,
455 uint64_t mask,
456 MemTxAttrs attrs)
458 uint64_t tmp = 0;
459 MemTxResult r;
461 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
462 if (mr->subpage) {
463 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
464 } else if (mr == &io_mem_notdirty) {
465 /* Accesses to code which has previously been translated into a TB show
466 * up in the MMIO path, as accesses to the io_mem_notdirty
467 * MemoryRegion. */
468 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
469 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
470 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
471 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
473 *value |= (tmp & mask) << shift;
474 return r;
477 static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
478 hwaddr addr,
479 uint64_t *value,
480 unsigned size,
481 unsigned shift,
482 uint64_t mask,
483 MemTxAttrs attrs)
485 uint64_t tmp;
487 tmp = (*value >> shift) & mask;
488 if (mr->subpage) {
489 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
490 } else if (mr == &io_mem_notdirty) {
491 /* Accesses to code which has previously been translated into a TB show
492 * up in the MMIO path, as accesses to the io_mem_notdirty
493 * MemoryRegion. */
494 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
495 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
496 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
497 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
499 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
500 return MEMTX_OK;
503 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
504 hwaddr addr,
505 uint64_t *value,
506 unsigned size,
507 unsigned shift,
508 uint64_t mask,
509 MemTxAttrs attrs)
511 uint64_t tmp;
513 tmp = (*value >> shift) & mask;
514 if (mr->subpage) {
515 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
516 } else if (mr == &io_mem_notdirty) {
517 /* Accesses to code which has previously been translated into a TB show
518 * up in the MMIO path, as accesses to the io_mem_notdirty
519 * MemoryRegion. */
520 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
521 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
522 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
523 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
525 mr->ops->write(mr->opaque, addr, tmp, size);
526 return MEMTX_OK;
529 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
530 hwaddr addr,
531 uint64_t *value,
532 unsigned size,
533 unsigned shift,
534 uint64_t mask,
535 MemTxAttrs attrs)
537 uint64_t tmp;
539 tmp = (*value >> shift) & mask;
540 if (mr->subpage) {
541 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
542 } else if (mr == &io_mem_notdirty) {
543 /* Accesses to code which has previously been translated into a TB show
544 * up in the MMIO path, as accesses to the io_mem_notdirty
545 * MemoryRegion. */
546 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
547 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
548 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
549 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
551 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
554 static MemTxResult access_with_adjusted_size(hwaddr addr,
555 uint64_t *value,
556 unsigned size,
557 unsigned access_size_min,
558 unsigned access_size_max,
559 MemTxResult (*access)(MemoryRegion *mr,
560 hwaddr addr,
561 uint64_t *value,
562 unsigned size,
563 unsigned shift,
564 uint64_t mask,
565 MemTxAttrs attrs),
566 MemoryRegion *mr,
567 MemTxAttrs attrs)
569 uint64_t access_mask;
570 unsigned access_size;
571 unsigned i;
572 MemTxResult r = MEMTX_OK;
574 if (!access_size_min) {
575 access_size_min = 1;
577 if (!access_size_max) {
578 access_size_max = 4;
581 /* FIXME: support unaligned access? */
582 access_size = MAX(MIN(size, access_size_max), access_size_min);
583 access_mask = -1ULL >> (64 - access_size * 8);
584 if (memory_region_big_endian(mr)) {
585 for (i = 0; i < size; i += access_size) {
586 r |= access(mr, addr + i, value, access_size,
587 (size - access_size - i) * 8, access_mask, attrs);
589 } else {
590 for (i = 0; i < size; i += access_size) {
591 r |= access(mr, addr + i, value, access_size, i * 8,
592 access_mask, attrs);
595 return r;
598 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
600 AddressSpace *as;
602 while (mr->container) {
603 mr = mr->container;
605 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
606 if (mr == as->root) {
607 return as;
610 return NULL;
613 /* Render a memory region into the global view. Ranges in @view obscure
614 * ranges in @mr.
616 static void render_memory_region(FlatView *view,
617 MemoryRegion *mr,
618 Int128 base,
619 AddrRange clip,
620 bool readonly)
622 MemoryRegion *subregion;
623 unsigned i;
624 hwaddr offset_in_region;
625 Int128 remain;
626 Int128 now;
627 FlatRange fr;
628 AddrRange tmp;
630 if (!mr->enabled) {
631 return;
634 int128_addto(&base, int128_make64(mr->addr));
635 readonly |= mr->readonly;
637 tmp = addrrange_make(base, mr->size);
639 if (!addrrange_intersects(tmp, clip)) {
640 return;
643 clip = addrrange_intersection(tmp, clip);
645 if (mr->alias) {
646 int128_subfrom(&base, int128_make64(mr->alias->addr));
647 int128_subfrom(&base, int128_make64(mr->alias_offset));
648 render_memory_region(view, mr->alias, base, clip, readonly);
649 return;
652 /* Render subregions in priority order. */
653 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
654 render_memory_region(view, subregion, base, clip, readonly);
657 if (!mr->terminates) {
658 return;
661 offset_in_region = int128_get64(int128_sub(clip.start, base));
662 base = clip.start;
663 remain = clip.size;
665 fr.mr = mr;
666 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
667 fr.romd_mode = mr->romd_mode;
668 fr.readonly = readonly;
670 /* Render the region itself into any gaps left by the current view. */
671 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
672 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
673 continue;
675 if (int128_lt(base, view->ranges[i].addr.start)) {
676 now = int128_min(remain,
677 int128_sub(view->ranges[i].addr.start, base));
678 fr.offset_in_region = offset_in_region;
679 fr.addr = addrrange_make(base, now);
680 flatview_insert(view, i, &fr);
681 ++i;
682 int128_addto(&base, now);
683 offset_in_region += int128_get64(now);
684 int128_subfrom(&remain, now);
686 now = int128_sub(int128_min(int128_add(base, remain),
687 addrrange_end(view->ranges[i].addr)),
688 base);
689 int128_addto(&base, now);
690 offset_in_region += int128_get64(now);
691 int128_subfrom(&remain, now);
693 if (int128_nz(remain)) {
694 fr.offset_in_region = offset_in_region;
695 fr.addr = addrrange_make(base, remain);
696 flatview_insert(view, i, &fr);
700 /* Render a memory topology into a list of disjoint absolute ranges. */
701 static FlatView *generate_memory_topology(MemoryRegion *mr)
703 FlatView *view;
705 view = g_new(FlatView, 1);
706 flatview_init(view);
708 if (mr) {
709 render_memory_region(view, mr, int128_zero(),
710 addrrange_make(int128_zero(), int128_2_64()), false);
712 flatview_simplify(view);
714 return view;
717 static void address_space_add_del_ioeventfds(AddressSpace *as,
718 MemoryRegionIoeventfd *fds_new,
719 unsigned fds_new_nb,
720 MemoryRegionIoeventfd *fds_old,
721 unsigned fds_old_nb)
723 unsigned iold, inew;
724 MemoryRegionIoeventfd *fd;
725 MemoryRegionSection section;
727 /* Generate a symmetric difference of the old and new fd sets, adding
728 * and deleting as necessary.
731 iold = inew = 0;
732 while (iold < fds_old_nb || inew < fds_new_nb) {
733 if (iold < fds_old_nb
734 && (inew == fds_new_nb
735 || memory_region_ioeventfd_before(fds_old[iold],
736 fds_new[inew]))) {
737 fd = &fds_old[iold];
738 section = (MemoryRegionSection) {
739 .address_space = as,
740 .offset_within_address_space = int128_get64(fd->addr.start),
741 .size = fd->addr.size,
743 MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
744 fd->match_data, fd->data, fd->e);
745 ++iold;
746 } else if (inew < fds_new_nb
747 && (iold == fds_old_nb
748 || memory_region_ioeventfd_before(fds_new[inew],
749 fds_old[iold]))) {
750 fd = &fds_new[inew];
751 section = (MemoryRegionSection) {
752 .address_space = as,
753 .offset_within_address_space = int128_get64(fd->addr.start),
754 .size = fd->addr.size,
756 MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
757 fd->match_data, fd->data, fd->e);
758 ++inew;
759 } else {
760 ++iold;
761 ++inew;
766 static FlatView *address_space_get_flatview(AddressSpace *as)
768 FlatView *view;
770 rcu_read_lock();
771 view = atomic_rcu_read(&as->current_map);
772 flatview_ref(view);
773 rcu_read_unlock();
774 return view;
777 static void address_space_update_ioeventfds(AddressSpace *as)
779 FlatView *view;
780 FlatRange *fr;
781 unsigned ioeventfd_nb = 0;
782 MemoryRegionIoeventfd *ioeventfds = NULL;
783 AddrRange tmp;
784 unsigned i;
786 view = address_space_get_flatview(as);
787 FOR_EACH_FLAT_RANGE(fr, view) {
788 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
789 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
790 int128_sub(fr->addr.start,
791 int128_make64(fr->offset_in_region)));
792 if (addrrange_intersects(fr->addr, tmp)) {
793 ++ioeventfd_nb;
794 ioeventfds = g_realloc(ioeventfds,
795 ioeventfd_nb * sizeof(*ioeventfds));
796 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
797 ioeventfds[ioeventfd_nb-1].addr = tmp;
802 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
803 as->ioeventfds, as->ioeventfd_nb);
805 g_free(as->ioeventfds);
806 as->ioeventfds = ioeventfds;
807 as->ioeventfd_nb = ioeventfd_nb;
808 flatview_unref(view);
811 static void address_space_update_topology_pass(AddressSpace *as,
812 const FlatView *old_view,
813 const FlatView *new_view,
814 bool adding)
816 unsigned iold, inew;
817 FlatRange *frold, *frnew;
819 /* Generate a symmetric difference of the old and new memory maps.
820 * Kill ranges in the old map, and instantiate ranges in the new map.
822 iold = inew = 0;
823 while (iold < old_view->nr || inew < new_view->nr) {
824 if (iold < old_view->nr) {
825 frold = &old_view->ranges[iold];
826 } else {
827 frold = NULL;
829 if (inew < new_view->nr) {
830 frnew = &new_view->ranges[inew];
831 } else {
832 frnew = NULL;
835 if (frold
836 && (!frnew
837 || int128_lt(frold->addr.start, frnew->addr.start)
838 || (int128_eq(frold->addr.start, frnew->addr.start)
839 && !flatrange_equal(frold, frnew)))) {
840 /* In old but not in new, or in both but attributes changed. */
842 if (!adding) {
843 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
846 ++iold;
847 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
848 /* In both and unchanged (except logging may have changed) */
850 if (adding) {
851 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
852 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
853 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
854 frold->dirty_log_mask,
855 frnew->dirty_log_mask);
857 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
858 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
859 frold->dirty_log_mask,
860 frnew->dirty_log_mask);
864 ++iold;
865 ++inew;
866 } else {
867 /* In new */
869 if (adding) {
870 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
873 ++inew;
879 static void address_space_update_topology(AddressSpace *as)
881 FlatView *old_view = address_space_get_flatview(as);
882 FlatView *new_view = generate_memory_topology(as->root);
884 address_space_update_topology_pass(as, old_view, new_view, false);
885 address_space_update_topology_pass(as, old_view, new_view, true);
887 /* Writes are protected by the BQL. */
888 atomic_rcu_set(&as->current_map, new_view);
889 call_rcu(old_view, flatview_unref, rcu);
891 /* Note that all the old MemoryRegions are still alive up to this
892 * point. This relieves most MemoryListeners from the need to
893 * ref/unref the MemoryRegions they get---unless they use them
894 * outside the iothread mutex, in which case precise reference
895 * counting is necessary.
897 flatview_unref(old_view);
899 address_space_update_ioeventfds(as);
902 void memory_region_transaction_begin(void)
904 qemu_flush_coalesced_mmio_buffer();
905 ++memory_region_transaction_depth;
908 static void memory_region_clear_pending(void)
910 memory_region_update_pending = false;
911 ioeventfd_update_pending = false;
914 void memory_region_transaction_commit(void)
916 AddressSpace *as;
918 assert(memory_region_transaction_depth);
919 --memory_region_transaction_depth;
920 if (!memory_region_transaction_depth) {
921 if (memory_region_update_pending) {
922 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
924 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
925 address_space_update_topology(as);
928 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
929 } else if (ioeventfd_update_pending) {
930 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
931 address_space_update_ioeventfds(as);
934 memory_region_clear_pending();
938 static void memory_region_destructor_none(MemoryRegion *mr)
942 static void memory_region_destructor_ram(MemoryRegion *mr)
944 qemu_ram_free(mr->ram_block);
947 static void memory_region_destructor_rom_device(MemoryRegion *mr)
949 qemu_ram_free(mr->ram_block);
952 static bool memory_region_need_escape(char c)
954 return c == '/' || c == '[' || c == '\\' || c == ']';
957 static char *memory_region_escape_name(const char *name)
959 const char *p;
960 char *escaped, *q;
961 uint8_t c;
962 size_t bytes = 0;
964 for (p = name; *p; p++) {
965 bytes += memory_region_need_escape(*p) ? 4 : 1;
967 if (bytes == p - name) {
968 return g_memdup(name, bytes + 1);
971 escaped = g_malloc(bytes + 1);
972 for (p = name, q = escaped; *p; p++) {
973 c = *p;
974 if (unlikely(memory_region_need_escape(c))) {
975 *q++ = '\\';
976 *q++ = 'x';
977 *q++ = "0123456789abcdef"[c >> 4];
978 c = "0123456789abcdef"[c & 15];
980 *q++ = c;
982 *q = 0;
983 return escaped;
986 void memory_region_init(MemoryRegion *mr,
987 Object *owner,
988 const char *name,
989 uint64_t size)
991 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
992 mr->size = int128_make64(size);
993 if (size == UINT64_MAX) {
994 mr->size = int128_2_64();
996 mr->name = g_strdup(name);
997 mr->owner = owner;
998 mr->ram_block = NULL;
1000 if (name) {
1001 char *escaped_name = memory_region_escape_name(name);
1002 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1004 if (!owner) {
1005 owner = container_get(qdev_get_machine(), "/unattached");
1008 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1009 object_unref(OBJECT(mr));
1010 g_free(name_array);
1011 g_free(escaped_name);
1015 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1016 void *opaque, Error **errp)
1018 MemoryRegion *mr = MEMORY_REGION(obj);
1019 uint64_t value = mr->addr;
1021 visit_type_uint64(v, name, &value, errp);
1024 static void memory_region_get_container(Object *obj, Visitor *v,
1025 const char *name, void *opaque,
1026 Error **errp)
1028 MemoryRegion *mr = MEMORY_REGION(obj);
1029 gchar *path = (gchar *)"";
1031 if (mr->container) {
1032 path = object_get_canonical_path(OBJECT(mr->container));
1034 visit_type_str(v, name, &path, errp);
1035 if (mr->container) {
1036 g_free(path);
1040 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1041 const char *part)
1043 MemoryRegion *mr = MEMORY_REGION(obj);
1045 return OBJECT(mr->container);
1048 static void memory_region_get_priority(Object *obj, Visitor *v,
1049 const char *name, void *opaque,
1050 Error **errp)
1052 MemoryRegion *mr = MEMORY_REGION(obj);
1053 int32_t value = mr->priority;
1055 visit_type_int32(v, name, &value, errp);
1058 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1059 void *opaque, Error **errp)
1061 MemoryRegion *mr = MEMORY_REGION(obj);
1062 uint64_t value = memory_region_size(mr);
1064 visit_type_uint64(v, name, &value, errp);
1067 static void memory_region_initfn(Object *obj)
1069 MemoryRegion *mr = MEMORY_REGION(obj);
1070 ObjectProperty *op;
1072 mr->ops = &unassigned_mem_ops;
1073 mr->enabled = true;
1074 mr->romd_mode = true;
1075 mr->global_locking = true;
1076 mr->destructor = memory_region_destructor_none;
1077 QTAILQ_INIT(&mr->subregions);
1078 QTAILQ_INIT(&mr->coalesced);
1080 op = object_property_add(OBJECT(mr), "container",
1081 "link<" TYPE_MEMORY_REGION ">",
1082 memory_region_get_container,
1083 NULL, /* memory_region_set_container */
1084 NULL, NULL, &error_abort);
1085 op->resolve = memory_region_resolve_container;
1087 object_property_add(OBJECT(mr), "addr", "uint64",
1088 memory_region_get_addr,
1089 NULL, /* memory_region_set_addr */
1090 NULL, NULL, &error_abort);
1091 object_property_add(OBJECT(mr), "priority", "uint32",
1092 memory_region_get_priority,
1093 NULL, /* memory_region_set_priority */
1094 NULL, NULL, &error_abort);
1095 object_property_add(OBJECT(mr), "size", "uint64",
1096 memory_region_get_size,
1097 NULL, /* memory_region_set_size, */
1098 NULL, NULL, &error_abort);
1101 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1102 unsigned size)
1104 #ifdef DEBUG_UNASSIGNED
1105 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1106 #endif
1107 if (current_cpu != NULL) {
1108 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1110 return 0;
1113 static void unassigned_mem_write(void *opaque, hwaddr addr,
1114 uint64_t val, unsigned size)
1116 #ifdef DEBUG_UNASSIGNED
1117 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1118 #endif
1119 if (current_cpu != NULL) {
1120 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1124 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1125 unsigned size, bool is_write)
1127 return false;
1130 const MemoryRegionOps unassigned_mem_ops = {
1131 .valid.accepts = unassigned_mem_accepts,
1132 .endianness = DEVICE_NATIVE_ENDIAN,
1135 bool memory_region_access_valid(MemoryRegion *mr,
1136 hwaddr addr,
1137 unsigned size,
1138 bool is_write)
1140 int access_size_min, access_size_max;
1141 int access_size, i;
1143 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1144 return false;
1147 if (!mr->ops->valid.accepts) {
1148 return true;
1151 access_size_min = mr->ops->valid.min_access_size;
1152 if (!mr->ops->valid.min_access_size) {
1153 access_size_min = 1;
1156 access_size_max = mr->ops->valid.max_access_size;
1157 if (!mr->ops->valid.max_access_size) {
1158 access_size_max = 4;
1161 access_size = MAX(MIN(size, access_size_max), access_size_min);
1162 for (i = 0; i < size; i += access_size) {
1163 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1164 is_write)) {
1165 return false;
1169 return true;
1172 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1173 hwaddr addr,
1174 uint64_t *pval,
1175 unsigned size,
1176 MemTxAttrs attrs)
1178 *pval = 0;
1180 if (mr->ops->read) {
1181 return access_with_adjusted_size(addr, pval, size,
1182 mr->ops->impl.min_access_size,
1183 mr->ops->impl.max_access_size,
1184 memory_region_read_accessor,
1185 mr, attrs);
1186 } else if (mr->ops->read_with_attrs) {
1187 return access_with_adjusted_size(addr, pval, size,
1188 mr->ops->impl.min_access_size,
1189 mr->ops->impl.max_access_size,
1190 memory_region_read_with_attrs_accessor,
1191 mr, attrs);
1192 } else {
1193 return access_with_adjusted_size(addr, pval, size, 1, 4,
1194 memory_region_oldmmio_read_accessor,
1195 mr, attrs);
1199 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1200 hwaddr addr,
1201 uint64_t *pval,
1202 unsigned size,
1203 MemTxAttrs attrs)
1205 MemTxResult r;
1207 if (!memory_region_access_valid(mr, addr, size, false)) {
1208 *pval = unassigned_mem_read(mr, addr, size);
1209 return MEMTX_DECODE_ERROR;
1212 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1213 adjust_endianness(mr, pval, size);
1214 return r;
1217 /* Return true if an eventfd was signalled */
1218 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1219 hwaddr addr,
1220 uint64_t data,
1221 unsigned size,
1222 MemTxAttrs attrs)
1224 MemoryRegionIoeventfd ioeventfd = {
1225 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1226 .data = data,
1228 unsigned i;
1230 for (i = 0; i < mr->ioeventfd_nb; i++) {
1231 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1232 ioeventfd.e = mr->ioeventfds[i].e;
1234 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1235 event_notifier_set(ioeventfd.e);
1236 return true;
1240 return false;
1243 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1244 hwaddr addr,
1245 uint64_t data,
1246 unsigned size,
1247 MemTxAttrs attrs)
1249 if (!memory_region_access_valid(mr, addr, size, true)) {
1250 unassigned_mem_write(mr, addr, data, size);
1251 return MEMTX_DECODE_ERROR;
1254 adjust_endianness(mr, &data, size);
1256 if ((!kvm_eventfds_enabled()) &&
1257 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1258 return MEMTX_OK;
1261 if (mr->ops->write) {
1262 return access_with_adjusted_size(addr, &data, size,
1263 mr->ops->impl.min_access_size,
1264 mr->ops->impl.max_access_size,
1265 memory_region_write_accessor, mr,
1266 attrs);
1267 } else if (mr->ops->write_with_attrs) {
1268 return
1269 access_with_adjusted_size(addr, &data, size,
1270 mr->ops->impl.min_access_size,
1271 mr->ops->impl.max_access_size,
1272 memory_region_write_with_attrs_accessor,
1273 mr, attrs);
1274 } else {
1275 return access_with_adjusted_size(addr, &data, size, 1, 4,
1276 memory_region_oldmmio_write_accessor,
1277 mr, attrs);
1281 void memory_region_init_io(MemoryRegion *mr,
1282 Object *owner,
1283 const MemoryRegionOps *ops,
1284 void *opaque,
1285 const char *name,
1286 uint64_t size)
1288 memory_region_init(mr, owner, name, size);
1289 mr->ops = ops ? ops : &unassigned_mem_ops;
1290 mr->opaque = opaque;
1291 mr->terminates = true;
1294 void memory_region_init_ram(MemoryRegion *mr,
1295 Object *owner,
1296 const char *name,
1297 uint64_t size,
1298 Error **errp)
1300 memory_region_init(mr, owner, name, size);
1301 mr->ram = true;
1302 mr->terminates = true;
1303 mr->destructor = memory_region_destructor_ram;
1304 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1305 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1308 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1309 Object *owner,
1310 const char *name,
1311 uint64_t size,
1312 uint64_t max_size,
1313 void (*resized)(const char*,
1314 uint64_t length,
1315 void *host),
1316 Error **errp)
1318 memory_region_init(mr, owner, name, size);
1319 mr->ram = true;
1320 mr->terminates = true;
1321 mr->destructor = memory_region_destructor_ram;
1322 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1323 mr, errp);
1324 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1327 #ifdef __linux__
1328 void memory_region_init_ram_from_file(MemoryRegion *mr,
1329 struct Object *owner,
1330 const char *name,
1331 uint64_t size,
1332 bool share,
1333 const char *path,
1334 Error **errp)
1336 memory_region_init(mr, owner, name, size);
1337 mr->ram = true;
1338 mr->terminates = true;
1339 mr->destructor = memory_region_destructor_ram;
1340 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1341 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1343 #endif
1345 void memory_region_init_ram_ptr(MemoryRegion *mr,
1346 Object *owner,
1347 const char *name,
1348 uint64_t size,
1349 void *ptr)
1351 memory_region_init(mr, owner, name, size);
1352 mr->ram = true;
1353 mr->terminates = true;
1354 mr->destructor = memory_region_destructor_ram;
1355 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1357 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1358 assert(ptr != NULL);
1359 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1362 void memory_region_set_skip_dump(MemoryRegion *mr)
1364 mr->skip_dump = true;
1367 void memory_region_init_alias(MemoryRegion *mr,
1368 Object *owner,
1369 const char *name,
1370 MemoryRegion *orig,
1371 hwaddr offset,
1372 uint64_t size)
1374 memory_region_init(mr, owner, name, size);
1375 mr->alias = orig;
1376 mr->alias_offset = offset;
1379 void memory_region_init_rom(MemoryRegion *mr,
1380 struct Object *owner,
1381 const char *name,
1382 uint64_t size,
1383 Error **errp)
1385 memory_region_init(mr, owner, name, size);
1386 mr->ram = true;
1387 mr->readonly = true;
1388 mr->terminates = true;
1389 mr->destructor = memory_region_destructor_ram;
1390 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1391 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1394 void memory_region_init_rom_device(MemoryRegion *mr,
1395 Object *owner,
1396 const MemoryRegionOps *ops,
1397 void *opaque,
1398 const char *name,
1399 uint64_t size,
1400 Error **errp)
1402 assert(ops);
1403 memory_region_init(mr, owner, name, size);
1404 mr->ops = ops;
1405 mr->opaque = opaque;
1406 mr->terminates = true;
1407 mr->rom_device = true;
1408 mr->destructor = memory_region_destructor_rom_device;
1409 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1412 void memory_region_init_iommu(MemoryRegion *mr,
1413 Object *owner,
1414 const MemoryRegionIOMMUOps *ops,
1415 const char *name,
1416 uint64_t size)
1418 memory_region_init(mr, owner, name, size);
1419 mr->iommu_ops = ops,
1420 mr->terminates = true; /* then re-forwards */
1421 notifier_list_init(&mr->iommu_notify);
1424 static void memory_region_finalize(Object *obj)
1426 MemoryRegion *mr = MEMORY_REGION(obj);
1428 assert(!mr->container);
1430 /* We know the region is not visible in any address space (it
1431 * does not have a container and cannot be a root either because
1432 * it has no references, so we can blindly clear mr->enabled.
1433 * memory_region_set_enabled instead could trigger a transaction
1434 * and cause an infinite loop.
1436 mr->enabled = false;
1437 memory_region_transaction_begin();
1438 while (!QTAILQ_EMPTY(&mr->subregions)) {
1439 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1440 memory_region_del_subregion(mr, subregion);
1442 memory_region_transaction_commit();
1444 mr->destructor(mr);
1445 memory_region_clear_coalescing(mr);
1446 g_free((char *)mr->name);
1447 g_free(mr->ioeventfds);
1450 Object *memory_region_owner(MemoryRegion *mr)
1452 Object *obj = OBJECT(mr);
1453 return obj->parent;
1456 void memory_region_ref(MemoryRegion *mr)
1458 /* MMIO callbacks most likely will access data that belongs
1459 * to the owner, hence the need to ref/unref the owner whenever
1460 * the memory region is in use.
1462 * The memory region is a child of its owner. As long as the
1463 * owner doesn't call unparent itself on the memory region,
1464 * ref-ing the owner will also keep the memory region alive.
1465 * Memory regions without an owner are supposed to never go away;
1466 * we do not ref/unref them because it slows down DMA sensibly.
1468 if (mr && mr->owner) {
1469 object_ref(mr->owner);
1473 void memory_region_unref(MemoryRegion *mr)
1475 if (mr && mr->owner) {
1476 object_unref(mr->owner);
1480 uint64_t memory_region_size(MemoryRegion *mr)
1482 if (int128_eq(mr->size, int128_2_64())) {
1483 return UINT64_MAX;
1485 return int128_get64(mr->size);
1488 const char *memory_region_name(const MemoryRegion *mr)
1490 if (!mr->name) {
1491 ((MemoryRegion *)mr)->name =
1492 object_get_canonical_path_component(OBJECT(mr));
1494 return mr->name;
1497 bool memory_region_is_skip_dump(MemoryRegion *mr)
1499 return mr->skip_dump;
1502 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1504 uint8_t mask = mr->dirty_log_mask;
1505 if (global_dirty_log) {
1506 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1508 return mask;
1511 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1513 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1516 void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
1518 if (mr->iommu_ops->notify_started &&
1519 QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
1520 mr->iommu_ops->notify_started(mr);
1522 notifier_list_add(&mr->iommu_notify, n);
1525 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr)
1527 assert(memory_region_is_iommu(mr));
1528 if (mr->iommu_ops && mr->iommu_ops->get_min_page_size) {
1529 return mr->iommu_ops->get_min_page_size(mr);
1531 return TARGET_PAGE_SIZE;
1534 void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
1536 hwaddr addr, granularity;
1537 IOMMUTLBEntry iotlb;
1539 granularity = memory_region_iommu_get_min_page_size(mr);
1541 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1542 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
1543 if (iotlb.perm != IOMMU_NONE) {
1544 n->notify(n, &iotlb);
1547 /* if (2^64 - MR size) < granularity, it's possible to get an
1548 * infinite loop here. This should catch such a wraparound */
1549 if ((addr + granularity) < addr) {
1550 break;
1555 void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
1557 notifier_remove(n);
1558 if (mr->iommu_ops->notify_stopped &&
1559 QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
1560 mr->iommu_ops->notify_stopped(mr);
1564 void memory_region_notify_iommu(MemoryRegion *mr,
1565 IOMMUTLBEntry entry)
1567 assert(memory_region_is_iommu(mr));
1568 notifier_list_notify(&mr->iommu_notify, &entry);
1571 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1573 uint8_t mask = 1 << client;
1574 uint8_t old_logging;
1576 assert(client == DIRTY_MEMORY_VGA);
1577 old_logging = mr->vga_logging_count;
1578 mr->vga_logging_count += log ? 1 : -1;
1579 if (!!old_logging == !!mr->vga_logging_count) {
1580 return;
1583 memory_region_transaction_begin();
1584 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1585 memory_region_update_pending |= mr->enabled;
1586 memory_region_transaction_commit();
1589 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1590 hwaddr size, unsigned client)
1592 assert(mr->ram_block);
1593 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1594 size, client);
1597 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1598 hwaddr size)
1600 assert(mr->ram_block);
1601 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1602 size,
1603 memory_region_get_dirty_log_mask(mr));
1606 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1607 hwaddr size, unsigned client)
1609 assert(mr->ram_block);
1610 return cpu_physical_memory_test_and_clear_dirty(
1611 memory_region_get_ram_addr(mr) + addr, size, client);
1615 void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1617 AddressSpace *as;
1618 FlatRange *fr;
1620 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1621 FlatView *view = address_space_get_flatview(as);
1622 FOR_EACH_FLAT_RANGE(fr, view) {
1623 if (fr->mr == mr) {
1624 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
1627 flatview_unref(view);
1631 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1633 if (mr->readonly != readonly) {
1634 memory_region_transaction_begin();
1635 mr->readonly = readonly;
1636 memory_region_update_pending |= mr->enabled;
1637 memory_region_transaction_commit();
1641 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
1643 if (mr->romd_mode != romd_mode) {
1644 memory_region_transaction_begin();
1645 mr->romd_mode = romd_mode;
1646 memory_region_update_pending |= mr->enabled;
1647 memory_region_transaction_commit();
1651 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1652 hwaddr size, unsigned client)
1654 assert(mr->ram_block);
1655 cpu_physical_memory_test_and_clear_dirty(
1656 memory_region_get_ram_addr(mr) + addr, size, client);
1659 int memory_region_get_fd(MemoryRegion *mr)
1661 int fd;
1663 rcu_read_lock();
1664 while (mr->alias) {
1665 mr = mr->alias;
1667 fd = mr->ram_block->fd;
1668 rcu_read_unlock();
1670 return fd;
1673 void memory_region_set_fd(MemoryRegion *mr, int fd)
1675 rcu_read_lock();
1676 while (mr->alias) {
1677 mr = mr->alias;
1679 mr->ram_block->fd = fd;
1680 rcu_read_unlock();
1683 void *memory_region_get_ram_ptr(MemoryRegion *mr)
1685 void *ptr;
1686 uint64_t offset = 0;
1688 rcu_read_lock();
1689 while (mr->alias) {
1690 offset += mr->alias_offset;
1691 mr = mr->alias;
1693 assert(mr->ram_block);
1694 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
1695 rcu_read_unlock();
1697 return ptr;
1700 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1702 RAMBlock *block;
1704 block = qemu_ram_block_from_host(ptr, false, offset);
1705 if (!block) {
1706 return NULL;
1709 return block->mr;
1712 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1714 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1717 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1719 assert(mr->ram_block);
1721 qemu_ram_resize(mr->ram_block, newsize, errp);
1724 static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
1726 FlatView *view;
1727 FlatRange *fr;
1728 CoalescedMemoryRange *cmr;
1729 AddrRange tmp;
1730 MemoryRegionSection section;
1732 view = address_space_get_flatview(as);
1733 FOR_EACH_FLAT_RANGE(fr, view) {
1734 if (fr->mr == mr) {
1735 section = (MemoryRegionSection) {
1736 .address_space = as,
1737 .offset_within_address_space = int128_get64(fr->addr.start),
1738 .size = fr->addr.size,
1741 MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
1742 int128_get64(fr->addr.start),
1743 int128_get64(fr->addr.size));
1744 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1745 tmp = addrrange_shift(cmr->addr,
1746 int128_sub(fr->addr.start,
1747 int128_make64(fr->offset_in_region)));
1748 if (!addrrange_intersects(tmp, fr->addr)) {
1749 continue;
1751 tmp = addrrange_intersection(tmp, fr->addr);
1752 MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
1753 int128_get64(tmp.start),
1754 int128_get64(tmp.size));
1758 flatview_unref(view);
1761 static void memory_region_update_coalesced_range(MemoryRegion *mr)
1763 AddressSpace *as;
1765 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1766 memory_region_update_coalesced_range_as(mr, as);
1770 void memory_region_set_coalescing(MemoryRegion *mr)
1772 memory_region_clear_coalescing(mr);
1773 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1776 void memory_region_add_coalescing(MemoryRegion *mr,
1777 hwaddr offset,
1778 uint64_t size)
1780 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
1782 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
1783 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1784 memory_region_update_coalesced_range(mr);
1785 memory_region_set_flush_coalesced(mr);
1788 void memory_region_clear_coalescing(MemoryRegion *mr)
1790 CoalescedMemoryRange *cmr;
1791 bool updated = false;
1793 qemu_flush_coalesced_mmio_buffer();
1794 mr->flush_coalesced_mmio = false;
1796 while (!QTAILQ_EMPTY(&mr->coalesced)) {
1797 cmr = QTAILQ_FIRST(&mr->coalesced);
1798 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
1799 g_free(cmr);
1800 updated = true;
1803 if (updated) {
1804 memory_region_update_coalesced_range(mr);
1808 void memory_region_set_flush_coalesced(MemoryRegion *mr)
1810 mr->flush_coalesced_mmio = true;
1813 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
1815 qemu_flush_coalesced_mmio_buffer();
1816 if (QTAILQ_EMPTY(&mr->coalesced)) {
1817 mr->flush_coalesced_mmio = false;
1821 void memory_region_set_global_locking(MemoryRegion *mr)
1823 mr->global_locking = true;
1826 void memory_region_clear_global_locking(MemoryRegion *mr)
1828 mr->global_locking = false;
1831 static bool userspace_eventfd_warning;
1833 void memory_region_add_eventfd(MemoryRegion *mr,
1834 hwaddr addr,
1835 unsigned size,
1836 bool match_data,
1837 uint64_t data,
1838 EventNotifier *e)
1840 MemoryRegionIoeventfd mrfd = {
1841 .addr.start = int128_make64(addr),
1842 .addr.size = int128_make64(size),
1843 .match_data = match_data,
1844 .data = data,
1845 .e = e,
1847 unsigned i;
1849 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
1850 userspace_eventfd_warning))) {
1851 userspace_eventfd_warning = true;
1852 error_report("Using eventfd without MMIO binding in KVM. "
1853 "Suboptimal performance expected");
1856 if (size) {
1857 adjust_endianness(mr, &mrfd.data, size);
1859 memory_region_transaction_begin();
1860 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1861 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1862 break;
1865 ++mr->ioeventfd_nb;
1866 mr->ioeventfds = g_realloc(mr->ioeventfds,
1867 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1868 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1869 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1870 mr->ioeventfds[i] = mrfd;
1871 ioeventfd_update_pending |= mr->enabled;
1872 memory_region_transaction_commit();
1875 void memory_region_del_eventfd(MemoryRegion *mr,
1876 hwaddr addr,
1877 unsigned size,
1878 bool match_data,
1879 uint64_t data,
1880 EventNotifier *e)
1882 MemoryRegionIoeventfd mrfd = {
1883 .addr.start = int128_make64(addr),
1884 .addr.size = int128_make64(size),
1885 .match_data = match_data,
1886 .data = data,
1887 .e = e,
1889 unsigned i;
1891 if (size) {
1892 adjust_endianness(mr, &mrfd.data, size);
1894 memory_region_transaction_begin();
1895 for (i = 0; i < mr->ioeventfd_nb; ++i) {
1896 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1897 break;
1900 assert(i != mr->ioeventfd_nb);
1901 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1902 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1903 --mr->ioeventfd_nb;
1904 mr->ioeventfds = g_realloc(mr->ioeventfds,
1905 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1906 ioeventfd_update_pending |= mr->enabled;
1907 memory_region_transaction_commit();
1910 static void memory_region_update_container_subregions(MemoryRegion *subregion)
1912 MemoryRegion *mr = subregion->container;
1913 MemoryRegion *other;
1915 memory_region_transaction_begin();
1917 memory_region_ref(subregion);
1918 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1919 if (subregion->priority >= other->priority) {
1920 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1921 goto done;
1924 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1925 done:
1926 memory_region_update_pending |= mr->enabled && subregion->enabled;
1927 memory_region_transaction_commit();
1930 static void memory_region_add_subregion_common(MemoryRegion *mr,
1931 hwaddr offset,
1932 MemoryRegion *subregion)
1934 assert(!subregion->container);
1935 subregion->container = mr;
1936 subregion->addr = offset;
1937 memory_region_update_container_subregions(subregion);
1940 void memory_region_add_subregion(MemoryRegion *mr,
1941 hwaddr offset,
1942 MemoryRegion *subregion)
1944 subregion->priority = 0;
1945 memory_region_add_subregion_common(mr, offset, subregion);
1948 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1949 hwaddr offset,
1950 MemoryRegion *subregion,
1951 int priority)
1953 subregion->priority = priority;
1954 memory_region_add_subregion_common(mr, offset, subregion);
1957 void memory_region_del_subregion(MemoryRegion *mr,
1958 MemoryRegion *subregion)
1960 memory_region_transaction_begin();
1961 assert(subregion->container == mr);
1962 subregion->container = NULL;
1963 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
1964 memory_region_unref(subregion);
1965 memory_region_update_pending |= mr->enabled && subregion->enabled;
1966 memory_region_transaction_commit();
1969 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
1971 if (enabled == mr->enabled) {
1972 return;
1974 memory_region_transaction_begin();
1975 mr->enabled = enabled;
1976 memory_region_update_pending = true;
1977 memory_region_transaction_commit();
1980 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
1982 Int128 s = int128_make64(size);
1984 if (size == UINT64_MAX) {
1985 s = int128_2_64();
1987 if (int128_eq(s, mr->size)) {
1988 return;
1990 memory_region_transaction_begin();
1991 mr->size = s;
1992 memory_region_update_pending = true;
1993 memory_region_transaction_commit();
1996 static void memory_region_readd_subregion(MemoryRegion *mr)
1998 MemoryRegion *container = mr->container;
2000 if (container) {
2001 memory_region_transaction_begin();
2002 memory_region_ref(mr);
2003 memory_region_del_subregion(container, mr);
2004 mr->container = container;
2005 memory_region_update_container_subregions(mr);
2006 memory_region_unref(mr);
2007 memory_region_transaction_commit();
2011 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2013 if (addr != mr->addr) {
2014 mr->addr = addr;
2015 memory_region_readd_subregion(mr);
2019 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2021 assert(mr->alias);
2023 if (offset == mr->alias_offset) {
2024 return;
2027 memory_region_transaction_begin();
2028 mr->alias_offset = offset;
2029 memory_region_update_pending |= mr->enabled;
2030 memory_region_transaction_commit();
2033 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2035 return mr->align;
2038 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2040 const AddrRange *addr = addr_;
2041 const FlatRange *fr = fr_;
2043 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2044 return -1;
2045 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2046 return 1;
2048 return 0;
2051 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2053 return bsearch(&addr, view->ranges, view->nr,
2054 sizeof(FlatRange), cmp_flatrange_addr);
2057 bool memory_region_is_mapped(MemoryRegion *mr)
2059 return mr->container ? true : false;
2062 /* Same as memory_region_find, but it does not add a reference to the
2063 * returned region. It must be called from an RCU critical section.
2065 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2066 hwaddr addr, uint64_t size)
2068 MemoryRegionSection ret = { .mr = NULL };
2069 MemoryRegion *root;
2070 AddressSpace *as;
2071 AddrRange range;
2072 FlatView *view;
2073 FlatRange *fr;
2075 addr += mr->addr;
2076 for (root = mr; root->container; ) {
2077 root = root->container;
2078 addr += root->addr;
2081 as = memory_region_to_address_space(root);
2082 if (!as) {
2083 return ret;
2085 range = addrrange_make(int128_make64(addr), int128_make64(size));
2087 view = atomic_rcu_read(&as->current_map);
2088 fr = flatview_lookup(view, range);
2089 if (!fr) {
2090 return ret;
2093 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2094 --fr;
2097 ret.mr = fr->mr;
2098 ret.address_space = as;
2099 range = addrrange_intersection(range, fr->addr);
2100 ret.offset_within_region = fr->offset_in_region;
2101 ret.offset_within_region += int128_get64(int128_sub(range.start,
2102 fr->addr.start));
2103 ret.size = range.size;
2104 ret.offset_within_address_space = int128_get64(range.start);
2105 ret.readonly = fr->readonly;
2106 return ret;
2109 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2110 hwaddr addr, uint64_t size)
2112 MemoryRegionSection ret;
2113 rcu_read_lock();
2114 ret = memory_region_find_rcu(mr, addr, size);
2115 if (ret.mr) {
2116 memory_region_ref(ret.mr);
2118 rcu_read_unlock();
2119 return ret;
2122 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2124 MemoryRegion *mr;
2126 rcu_read_lock();
2127 mr = memory_region_find_rcu(container, addr, 1).mr;
2128 rcu_read_unlock();
2129 return mr && mr != container;
2132 void address_space_sync_dirty_bitmap(AddressSpace *as)
2134 FlatView *view;
2135 FlatRange *fr;
2137 view = address_space_get_flatview(as);
2138 FOR_EACH_FLAT_RANGE(fr, view) {
2139 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
2141 flatview_unref(view);
2144 void memory_global_dirty_log_start(void)
2146 global_dirty_log = true;
2148 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2150 /* Refresh DIRTY_LOG_MIGRATION bit. */
2151 memory_region_transaction_begin();
2152 memory_region_update_pending = true;
2153 memory_region_transaction_commit();
2156 void memory_global_dirty_log_stop(void)
2158 global_dirty_log = false;
2160 /* Refresh DIRTY_LOG_MIGRATION bit. */
2161 memory_region_transaction_begin();
2162 memory_region_update_pending = true;
2163 memory_region_transaction_commit();
2165 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2168 static void listener_add_address_space(MemoryListener *listener,
2169 AddressSpace *as)
2171 FlatView *view;
2172 FlatRange *fr;
2174 if (listener->address_space_filter
2175 && listener->address_space_filter != as) {
2176 return;
2179 if (listener->begin) {
2180 listener->begin(listener);
2182 if (global_dirty_log) {
2183 if (listener->log_global_start) {
2184 listener->log_global_start(listener);
2188 view = address_space_get_flatview(as);
2189 FOR_EACH_FLAT_RANGE(fr, view) {
2190 MemoryRegionSection section = {
2191 .mr = fr->mr,
2192 .address_space = as,
2193 .offset_within_region = fr->offset_in_region,
2194 .size = fr->addr.size,
2195 .offset_within_address_space = int128_get64(fr->addr.start),
2196 .readonly = fr->readonly,
2198 if (fr->dirty_log_mask && listener->log_start) {
2199 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2201 if (listener->region_add) {
2202 listener->region_add(listener, &section);
2205 if (listener->commit) {
2206 listener->commit(listener);
2208 flatview_unref(view);
2211 void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
2213 MemoryListener *other = NULL;
2214 AddressSpace *as;
2216 listener->address_space_filter = filter;
2217 if (QTAILQ_EMPTY(&memory_listeners)
2218 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2219 memory_listeners)->priority) {
2220 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2221 } else {
2222 QTAILQ_FOREACH(other, &memory_listeners, link) {
2223 if (listener->priority < other->priority) {
2224 break;
2227 QTAILQ_INSERT_BEFORE(other, listener, link);
2230 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2231 listener_add_address_space(listener, as);
2235 void memory_listener_unregister(MemoryListener *listener)
2237 QTAILQ_REMOVE(&memory_listeners, listener, link);
2240 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2242 memory_region_ref(root);
2243 memory_region_transaction_begin();
2244 as->ref_count = 1;
2245 as->root = root;
2246 as->malloced = false;
2247 as->current_map = g_new(FlatView, 1);
2248 flatview_init(as->current_map);
2249 as->ioeventfd_nb = 0;
2250 as->ioeventfds = NULL;
2251 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2252 as->name = g_strdup(name ? name : "anonymous");
2253 address_space_init_dispatch(as);
2254 memory_region_update_pending |= root->enabled;
2255 memory_region_transaction_commit();
2258 static void do_address_space_destroy(AddressSpace *as)
2260 MemoryListener *listener;
2261 bool do_free = as->malloced;
2263 address_space_destroy_dispatch(as);
2265 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2266 assert(listener->address_space_filter != as);
2269 flatview_unref(as->current_map);
2270 g_free(as->name);
2271 g_free(as->ioeventfds);
2272 memory_region_unref(as->root);
2273 if (do_free) {
2274 g_free(as);
2278 AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2280 AddressSpace *as;
2282 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2283 if (root == as->root && as->malloced) {
2284 as->ref_count++;
2285 return as;
2289 as = g_malloc0(sizeof *as);
2290 address_space_init(as, root, name);
2291 as->malloced = true;
2292 return as;
2295 void address_space_destroy(AddressSpace *as)
2297 MemoryRegion *root = as->root;
2299 as->ref_count--;
2300 if (as->ref_count) {
2301 return;
2303 /* Flush out anything from MemoryListeners listening in on this */
2304 memory_region_transaction_begin();
2305 as->root = NULL;
2306 memory_region_transaction_commit();
2307 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2308 address_space_unregister(as);
2310 /* At this point, as->dispatch and as->current_map are dummy
2311 * entries that the guest should never use. Wait for the old
2312 * values to expire before freeing the data.
2314 as->root = root;
2315 call_rcu(as, do_address_space_destroy, rcu);
2318 typedef struct MemoryRegionList MemoryRegionList;
2320 struct MemoryRegionList {
2321 const MemoryRegion *mr;
2322 QTAILQ_ENTRY(MemoryRegionList) queue;
2325 typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
2327 static void mtree_print_mr(fprintf_function mon_printf, void *f,
2328 const MemoryRegion *mr, unsigned int level,
2329 hwaddr base,
2330 MemoryRegionListHead *alias_print_queue)
2332 MemoryRegionList *new_ml, *ml, *next_ml;
2333 MemoryRegionListHead submr_print_queue;
2334 const MemoryRegion *submr;
2335 unsigned int i;
2337 if (!mr) {
2338 return;
2341 for (i = 0; i < level; i++) {
2342 mon_printf(f, " ");
2345 if (mr->alias) {
2346 MemoryRegionList *ml;
2347 bool found = false;
2349 /* check if the alias is already in the queue */
2350 QTAILQ_FOREACH(ml, alias_print_queue, queue) {
2351 if (ml->mr == mr->alias) {
2352 found = true;
2356 if (!found) {
2357 ml = g_new(MemoryRegionList, 1);
2358 ml->mr = mr->alias;
2359 QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
2361 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2362 " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
2363 "-" TARGET_FMT_plx "%s\n",
2364 base + mr->addr,
2365 base + mr->addr
2366 + (int128_nz(mr->size) ?
2367 (hwaddr)int128_get64(int128_sub(mr->size,
2368 int128_one())) : 0),
2369 mr->priority,
2370 mr->romd_mode ? 'R' : '-',
2371 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2372 : '-',
2373 memory_region_name(mr),
2374 memory_region_name(mr->alias),
2375 mr->alias_offset,
2376 mr->alias_offset
2377 + (int128_nz(mr->size) ?
2378 (hwaddr)int128_get64(int128_sub(mr->size,
2379 int128_one())) : 0),
2380 mr->enabled ? "" : " [disabled]");
2381 } else {
2382 mon_printf(f,
2383 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s%s\n",
2384 base + mr->addr,
2385 base + mr->addr
2386 + (int128_nz(mr->size) ?
2387 (hwaddr)int128_get64(int128_sub(mr->size,
2388 int128_one())) : 0),
2389 mr->priority,
2390 mr->romd_mode ? 'R' : '-',
2391 !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
2392 : '-',
2393 memory_region_name(mr),
2394 mr->enabled ? "" : " [disabled]");
2397 QTAILQ_INIT(&submr_print_queue);
2399 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2400 new_ml = g_new(MemoryRegionList, 1);
2401 new_ml->mr = submr;
2402 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2403 if (new_ml->mr->addr < ml->mr->addr ||
2404 (new_ml->mr->addr == ml->mr->addr &&
2405 new_ml->mr->priority > ml->mr->priority)) {
2406 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
2407 new_ml = NULL;
2408 break;
2411 if (new_ml) {
2412 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
2416 QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
2417 mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
2418 alias_print_queue);
2421 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
2422 g_free(ml);
2426 void mtree_info(fprintf_function mon_printf, void *f)
2428 MemoryRegionListHead ml_head;
2429 MemoryRegionList *ml, *ml2;
2430 AddressSpace *as;
2432 QTAILQ_INIT(&ml_head);
2434 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2435 mon_printf(f, "address-space: %s\n", as->name);
2436 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2437 mon_printf(f, "\n");
2440 /* print aliased regions */
2441 QTAILQ_FOREACH(ml, &ml_head, queue) {
2442 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2443 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2444 mon_printf(f, "\n");
2447 QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
2448 g_free(ml);
2452 static const TypeInfo memory_region_info = {
2453 .parent = TYPE_OBJECT,
2454 .name = TYPE_MEMORY_REGION,
2455 .instance_size = sizeof(MemoryRegion),
2456 .instance_init = memory_region_initfn,
2457 .instance_finalize = memory_region_finalize,
2460 static void memory_register_types(void)
2462 type_register_static(&memory_region_info);
2465 type_init(memory_register_types)