memory: Introduce replay_discarded callback for RamDiscardManager
[qemu/kevin.git] / include / exec / memory.h
blob20f1b27377eabf12b217d67c848257399dfd1177
1 /*
2 * Physical memory management API
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef MEMORY_H
15 #define MEMORY_H
17 #ifndef CONFIG_USER_ONLY
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/notify.h"
28 #include "qom/object.h"
29 #include "qemu/rcu.h"
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
33 #define MAX_PHYS_ADDR_SPACE_BITS 62
34 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
36 #define TYPE_MEMORY_REGION "memory-region"
37 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
38 TYPE_MEMORY_REGION)
40 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
41 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
42 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
43 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
45 #define TYPE_RAM_DISCARD_MANAGER "qemu:ram-discard-manager"
46 typedef struct RamDiscardManagerClass RamDiscardManagerClass;
47 typedef struct RamDiscardManager RamDiscardManager;
48 DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
49 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
51 #ifdef CONFIG_FUZZ
52 void fuzz_dma_read_cb(size_t addr,
53 size_t len,
54 MemoryRegion *mr);
55 #else
56 static inline void fuzz_dma_read_cb(size_t addr,
57 size_t len,
58 MemoryRegion *mr)
60 /* Do Nothing */
62 #endif
64 /* Possible bits for global_dirty_log_{start|stop} */
66 /* Dirty tracking enabled because migration is running */
67 #define GLOBAL_DIRTY_MIGRATION (1U << 0)
69 /* Dirty tracking enabled because measuring dirty rate */
70 #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
72 #define GLOBAL_DIRTY_MASK (0x3)
74 extern unsigned int global_dirty_tracking;
76 typedef struct MemoryRegionOps MemoryRegionOps;
78 struct ReservedRegion {
79 hwaddr low;
80 hwaddr high;
81 unsigned type;
84 /**
85 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
87 * @mr: the region, or %NULL if empty
88 * @fv: the flat view of the address space the region is mapped in
89 * @offset_within_region: the beginning of the section, relative to @mr's start
90 * @size: the size of the section; will not exceed @mr's boundaries
91 * @offset_within_address_space: the address of the first byte of the section
92 * relative to the region's address space
93 * @readonly: writes to this section are ignored
94 * @nonvolatile: this section is non-volatile
96 struct MemoryRegionSection {
97 Int128 size;
98 MemoryRegion *mr;
99 FlatView *fv;
100 hwaddr offset_within_region;
101 hwaddr offset_within_address_space;
102 bool readonly;
103 bool nonvolatile;
106 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
108 /* See address_space_translate: bit 0 is read, bit 1 is write. */
109 typedef enum {
110 IOMMU_NONE = 0,
111 IOMMU_RO = 1,
112 IOMMU_WO = 2,
113 IOMMU_RW = 3,
114 } IOMMUAccessFlags;
116 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
118 struct IOMMUTLBEntry {
119 AddressSpace *target_as;
120 hwaddr iova;
121 hwaddr translated_addr;
122 hwaddr addr_mask; /* 0xfff = 4k translation */
123 IOMMUAccessFlags perm;
127 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
128 * register with one or multiple IOMMU Notifier capability bit(s).
130 typedef enum {
131 IOMMU_NOTIFIER_NONE = 0,
132 /* Notify cache invalidations */
133 IOMMU_NOTIFIER_UNMAP = 0x1,
134 /* Notify entry changes (newly created entries) */
135 IOMMU_NOTIFIER_MAP = 0x2,
136 /* Notify changes on device IOTLB entries */
137 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
138 } IOMMUNotifierFlag;
140 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
141 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
142 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
143 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
145 struct IOMMUNotifier;
146 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
147 IOMMUTLBEntry *data);
149 struct IOMMUNotifier {
150 IOMMUNotify notify;
151 IOMMUNotifierFlag notifier_flags;
152 /* Notify for address space range start <= addr <= end */
153 hwaddr start;
154 hwaddr end;
155 int iommu_idx;
156 QLIST_ENTRY(IOMMUNotifier) node;
158 typedef struct IOMMUNotifier IOMMUNotifier;
160 typedef struct IOMMUTLBEvent {
161 IOMMUNotifierFlag type;
162 IOMMUTLBEntry entry;
163 } IOMMUTLBEvent;
165 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
166 #define RAM_PREALLOC (1 << 0)
168 /* RAM is mmap-ed with MAP_SHARED */
169 #define RAM_SHARED (1 << 1)
171 /* Only a portion of RAM (used_length) is actually used, and migrated.
172 * Resizing RAM while migrating can result in the migration being canceled.
174 #define RAM_RESIZEABLE (1 << 2)
176 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
177 * zero the page and wake waiting processes.
178 * (Set during postcopy)
180 #define RAM_UF_ZEROPAGE (1 << 3)
182 /* RAM can be migrated */
183 #define RAM_MIGRATABLE (1 << 4)
185 /* RAM is a persistent kind memory */
186 #define RAM_PMEM (1 << 5)
190 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
191 * support 'write-tracking' migration type.
192 * Implies ram_state->ram_wt_enabled.
194 #define RAM_UF_WRITEPROTECT (1 << 6)
197 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
198 * pages if applicable) is skipped: will bail out if not supported. When not
199 * set, the OS will do the reservation, if supported for the memory type.
201 #define RAM_NORESERVE (1 << 7)
203 /* RAM that isn't accessible through normal means. */
204 #define RAM_PROTECTED (1 << 8)
206 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
207 IOMMUNotifierFlag flags,
208 hwaddr start, hwaddr end,
209 int iommu_idx)
211 n->notify = fn;
212 n->notifier_flags = flags;
213 n->start = start;
214 n->end = end;
215 n->iommu_idx = iommu_idx;
219 * Memory region callbacks
221 struct MemoryRegionOps {
222 /* Read from the memory region. @addr is relative to @mr; @size is
223 * in bytes. */
224 uint64_t (*read)(void *opaque,
225 hwaddr addr,
226 unsigned size);
227 /* Write to the memory region. @addr is relative to @mr; @size is
228 * in bytes. */
229 void (*write)(void *opaque,
230 hwaddr addr,
231 uint64_t data,
232 unsigned size);
234 MemTxResult (*read_with_attrs)(void *opaque,
235 hwaddr addr,
236 uint64_t *data,
237 unsigned size,
238 MemTxAttrs attrs);
239 MemTxResult (*write_with_attrs)(void *opaque,
240 hwaddr addr,
241 uint64_t data,
242 unsigned size,
243 MemTxAttrs attrs);
245 enum device_endian endianness;
246 /* Guest-visible constraints: */
247 struct {
248 /* If nonzero, specify bounds on access sizes beyond which a machine
249 * check is thrown.
251 unsigned min_access_size;
252 unsigned max_access_size;
253 /* If true, unaligned accesses are supported. Otherwise unaligned
254 * accesses throw machine checks.
256 bool unaligned;
258 * If present, and returns #false, the transaction is not accepted
259 * by the device (and results in machine dependent behaviour such
260 * as a machine check exception).
262 bool (*accepts)(void *opaque, hwaddr addr,
263 unsigned size, bool is_write,
264 MemTxAttrs attrs);
265 } valid;
266 /* Internal implementation constraints: */
267 struct {
268 /* If nonzero, specifies the minimum size implemented. Smaller sizes
269 * will be rounded upwards and a partial result will be returned.
271 unsigned min_access_size;
272 /* If nonzero, specifies the maximum size implemented. Larger sizes
273 * will be done as a series of accesses with smaller sizes.
275 unsigned max_access_size;
276 /* If true, unaligned accesses are supported. Otherwise all accesses
277 * are converted to (possibly multiple) naturally aligned accesses.
279 bool unaligned;
280 } impl;
283 typedef struct MemoryRegionClass {
284 /* private */
285 ObjectClass parent_class;
286 } MemoryRegionClass;
289 enum IOMMUMemoryRegionAttr {
290 IOMMU_ATTR_SPAPR_TCE_FD
294 * IOMMUMemoryRegionClass:
296 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
297 * and provide an implementation of at least the @translate method here
298 * to handle requests to the memory region. Other methods are optional.
300 * The IOMMU implementation must use the IOMMU notifier infrastructure
301 * to report whenever mappings are changed, by calling
302 * memory_region_notify_iommu() (or, if necessary, by calling
303 * memory_region_notify_iommu_one() for each registered notifier).
305 * Conceptually an IOMMU provides a mapping from input address
306 * to an output TLB entry. If the IOMMU is aware of memory transaction
307 * attributes and the output TLB entry depends on the transaction
308 * attributes, we represent this using IOMMU indexes. Each index
309 * selects a particular translation table that the IOMMU has:
311 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
313 * @translate takes an input address and an IOMMU index
315 * and the mapping returned can only depend on the input address and the
316 * IOMMU index.
318 * Most IOMMUs don't care about the transaction attributes and support
319 * only a single IOMMU index. A more complex IOMMU might have one index
320 * for secure transactions and one for non-secure transactions.
322 struct IOMMUMemoryRegionClass {
323 /* private: */
324 MemoryRegionClass parent_class;
326 /* public: */
328 * @translate:
330 * Return a TLB entry that contains a given address.
332 * The IOMMUAccessFlags indicated via @flag are optional and may
333 * be specified as IOMMU_NONE to indicate that the caller needs
334 * the full translation information for both reads and writes. If
335 * the access flags are specified then the IOMMU implementation
336 * may use this as an optimization, to stop doing a page table
337 * walk as soon as it knows that the requested permissions are not
338 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
339 * full page table walk and report the permissions in the returned
340 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
341 * return different mappings for reads and writes.)
343 * The returned information remains valid while the caller is
344 * holding the big QEMU lock or is inside an RCU critical section;
345 * if the caller wishes to cache the mapping beyond that it must
346 * register an IOMMU notifier so it can invalidate its cached
347 * information when the IOMMU mapping changes.
349 * @iommu: the IOMMUMemoryRegion
351 * @hwaddr: address to be translated within the memory region
353 * @flag: requested access permission
355 * @iommu_idx: IOMMU index for the translation
357 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
358 IOMMUAccessFlags flag, int iommu_idx);
360 * @get_min_page_size:
362 * Returns minimum supported page size in bytes.
364 * If this method is not provided then the minimum is assumed to
365 * be TARGET_PAGE_SIZE.
367 * @iommu: the IOMMUMemoryRegion
369 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
371 * @notify_flag_changed:
373 * Called when IOMMU Notifier flag changes (ie when the set of
374 * events which IOMMU users are requesting notification for changes).
375 * Optional method -- need not be provided if the IOMMU does not
376 * need to know exactly which events must be notified.
378 * @iommu: the IOMMUMemoryRegion
380 * @old_flags: events which previously needed to be notified
382 * @new_flags: events which now need to be notified
384 * Returns 0 on success, or a negative errno; in particular
385 * returns -EINVAL if the new flag bitmap is not supported by the
386 * IOMMU memory region. In case of failure, the error object
387 * must be created
389 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
390 IOMMUNotifierFlag old_flags,
391 IOMMUNotifierFlag new_flags,
392 Error **errp);
394 * @replay:
396 * Called to handle memory_region_iommu_replay().
398 * The default implementation of memory_region_iommu_replay() is to
399 * call the IOMMU translate method for every page in the address space
400 * with flag == IOMMU_NONE and then call the notifier if translate
401 * returns a valid mapping. If this method is implemented then it
402 * overrides the default behaviour, and must provide the full semantics
403 * of memory_region_iommu_replay(), by calling @notifier for every
404 * translation present in the IOMMU.
406 * Optional method -- an IOMMU only needs to provide this method
407 * if the default is inefficient or produces undesirable side effects.
409 * Note: this is not related to record-and-replay functionality.
411 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
414 * @get_attr:
416 * Get IOMMU misc attributes. This is an optional method that
417 * can be used to allow users of the IOMMU to get implementation-specific
418 * information. The IOMMU implements this method to handle calls
419 * by IOMMU users to memory_region_iommu_get_attr() by filling in
420 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
421 * the IOMMU supports. If the method is unimplemented then
422 * memory_region_iommu_get_attr() will always return -EINVAL.
424 * @iommu: the IOMMUMemoryRegion
426 * @attr: attribute being queried
428 * @data: memory to fill in with the attribute data
430 * Returns 0 on success, or a negative errno; in particular
431 * returns -EINVAL for unrecognized or unimplemented attribute types.
433 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
434 void *data);
437 * @attrs_to_index:
439 * Return the IOMMU index to use for a given set of transaction attributes.
441 * Optional method: if an IOMMU only supports a single IOMMU index then
442 * the default implementation of memory_region_iommu_attrs_to_index()
443 * will return 0.
445 * The indexes supported by an IOMMU must be contiguous, starting at 0.
447 * @iommu: the IOMMUMemoryRegion
448 * @attrs: memory transaction attributes
450 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
453 * @num_indexes:
455 * Return the number of IOMMU indexes this IOMMU supports.
457 * Optional method: if this method is not provided, then
458 * memory_region_iommu_num_indexes() will return 1, indicating that
459 * only a single IOMMU index is supported.
461 * @iommu: the IOMMUMemoryRegion
463 int (*num_indexes)(IOMMUMemoryRegion *iommu);
466 * @iommu_set_page_size_mask:
468 * Restrict the page size mask that can be supported with a given IOMMU
469 * memory region. Used for example to propagate host physical IOMMU page
470 * size mask limitations to the virtual IOMMU.
472 * Optional method: if this method is not provided, then the default global
473 * page mask is used.
475 * @iommu: the IOMMUMemoryRegion
477 * @page_size_mask: a bitmask of supported page sizes. At least one bit,
478 * representing the smallest page size, must be set. Additional set bits
479 * represent supported block sizes. For example a host physical IOMMU that
480 * uses page tables with a page size of 4kB, and supports 2MB and 4GB
481 * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
482 * block sizes is specified with mask 0xfffffffffffff000.
484 * Returns 0 on success, or a negative error. In case of failure, the error
485 * object must be created.
487 int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
488 uint64_t page_size_mask,
489 Error **errp);
492 typedef struct RamDiscardListener RamDiscardListener;
493 typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
494 MemoryRegionSection *section);
495 typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
496 MemoryRegionSection *section);
498 struct RamDiscardListener {
500 * @notify_populate:
502 * Notification that previously discarded memory is about to get populated.
503 * Listeners are able to object. If any listener objects, already
504 * successfully notified listeners are notified about a discard again.
506 * @rdl: the #RamDiscardListener getting notified
507 * @section: the #MemoryRegionSection to get populated. The section
508 * is aligned within the memory region to the minimum granularity
509 * unless it would exceed the registered section.
511 * Returns 0 on success. If the notification is rejected by the listener,
512 * an error is returned.
514 NotifyRamPopulate notify_populate;
517 * @notify_discard:
519 * Notification that previously populated memory was discarded successfully
520 * and listeners should drop all references to such memory and prevent
521 * new population (e.g., unmap).
523 * @rdl: the #RamDiscardListener getting notified
524 * @section: the #MemoryRegionSection to get populated. The section
525 * is aligned within the memory region to the minimum granularity
526 * unless it would exceed the registered section.
528 NotifyRamDiscard notify_discard;
531 * @double_discard_supported:
533 * The listener suppors getting @notify_discard notifications that span
534 * already discarded parts.
536 bool double_discard_supported;
538 MemoryRegionSection *section;
539 QLIST_ENTRY(RamDiscardListener) next;
542 static inline void ram_discard_listener_init(RamDiscardListener *rdl,
543 NotifyRamPopulate populate_fn,
544 NotifyRamDiscard discard_fn,
545 bool double_discard_supported)
547 rdl->notify_populate = populate_fn;
548 rdl->notify_discard = discard_fn;
549 rdl->double_discard_supported = double_discard_supported;
552 typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
553 typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
556 * RamDiscardManagerClass:
558 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
559 * regions are currently populated to be used/accessed by the VM, notifying
560 * after parts were discarded (freeing up memory) and before parts will be
561 * populated (consuming memory), to be used/acessed by the VM.
563 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
564 * #MemoryRegion isn't mapped yet; it cannot change while the #MemoryRegion is
565 * mapped.
567 * The #RamDiscardManager is intended to be used by technologies that are
568 * incompatible with discarding of RAM (e.g., VFIO, which may pin all
569 * memory inside a #MemoryRegion), and require proper coordination to only
570 * map the currently populated parts, to hinder parts that are expected to
571 * remain discarded from silently getting populated and consuming memory.
572 * Technologies that support discarding of RAM don't have to bother and can
573 * simply map the whole #MemoryRegion.
575 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
576 * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
577 * Logically unplugging memory consists of discarding RAM. The VM agreed to not
578 * access unplugged (discarded) memory - especially via DMA. virtio-mem will
579 * properly coordinate with listeners before memory is plugged (populated),
580 * and after memory is unplugged (discarded).
582 * Listeners are called in multiples of the minimum granularity (unless it
583 * would exceed the registered range) and changes are aligned to the minimum
584 * granularity within the #MemoryRegion. Listeners have to prepare for memory
585 * becomming discarded in a different granularity than it was populated and the
586 * other way around.
588 struct RamDiscardManagerClass {
589 /* private */
590 InterfaceClass parent_class;
592 /* public */
595 * @get_min_granularity:
597 * Get the minimum granularity in which listeners will get notified
598 * about changes within the #MemoryRegion via the #RamDiscardManager.
600 * @rdm: the #RamDiscardManager
601 * @mr: the #MemoryRegion
603 * Returns the minimum granularity.
605 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
606 const MemoryRegion *mr);
609 * @is_populated:
611 * Check whether the given #MemoryRegionSection is completely populated
612 * (i.e., no parts are currently discarded) via the #RamDiscardManager.
613 * There are no alignment requirements.
615 * @rdm: the #RamDiscardManager
616 * @section: the #MemoryRegionSection
618 * Returns whether the given range is completely populated.
620 bool (*is_populated)(const RamDiscardManager *rdm,
621 const MemoryRegionSection *section);
624 * @replay_populated:
626 * Call the #ReplayRamPopulate callback for all populated parts within the
627 * #MemoryRegionSection via the #RamDiscardManager.
629 * In case any call fails, no further calls are made.
631 * @rdm: the #RamDiscardManager
632 * @section: the #MemoryRegionSection
633 * @replay_fn: the #ReplayRamPopulate callback
634 * @opaque: pointer to forward to the callback
636 * Returns 0 on success, or a negative error if any notification failed.
638 int (*replay_populated)(const RamDiscardManager *rdm,
639 MemoryRegionSection *section,
640 ReplayRamPopulate replay_fn, void *opaque);
643 * @replay_discarded:
645 * Call the #ReplayRamDiscard callback for all discarded parts within the
646 * #MemoryRegionSection via the #RamDiscardManager.
648 * @rdm: the #RamDiscardManager
649 * @section: the #MemoryRegionSection
650 * @replay_fn: the #ReplayRamDiscard callback
651 * @opaque: pointer to forward to the callback
653 void (*replay_discarded)(const RamDiscardManager *rdm,
654 MemoryRegionSection *section,
655 ReplayRamDiscard replay_fn, void *opaque);
658 * @register_listener:
660 * Register a #RamDiscardListener for the given #MemoryRegionSection and
661 * immediately notify the #RamDiscardListener about all populated parts
662 * within the #MemoryRegionSection via the #RamDiscardManager.
664 * In case any notification fails, no further notifications are triggered
665 * and an error is logged.
667 * @rdm: the #RamDiscardManager
668 * @rdl: the #RamDiscardListener
669 * @section: the #MemoryRegionSection
671 void (*register_listener)(RamDiscardManager *rdm,
672 RamDiscardListener *rdl,
673 MemoryRegionSection *section);
676 * @unregister_listener:
678 * Unregister a previously registered #RamDiscardListener via the
679 * #RamDiscardManager after notifying the #RamDiscardListener about all
680 * populated parts becoming unpopulated within the registered
681 * #MemoryRegionSection.
683 * @rdm: the #RamDiscardManager
684 * @rdl: the #RamDiscardListener
686 void (*unregister_listener)(RamDiscardManager *rdm,
687 RamDiscardListener *rdl);
690 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
691 const MemoryRegion *mr);
693 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
694 const MemoryRegionSection *section);
696 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
697 MemoryRegionSection *section,
698 ReplayRamPopulate replay_fn,
699 void *opaque);
701 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
702 MemoryRegionSection *section,
703 ReplayRamDiscard replay_fn,
704 void *opaque);
706 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
707 RamDiscardListener *rdl,
708 MemoryRegionSection *section);
710 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
711 RamDiscardListener *rdl);
713 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
714 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
716 /** MemoryRegion:
718 * A struct representing a memory region.
720 struct MemoryRegion {
721 Object parent_obj;
723 /* private: */
725 /* The following fields should fit in a cache line */
726 bool romd_mode;
727 bool ram;
728 bool subpage;
729 bool readonly; /* For RAM regions */
730 bool nonvolatile;
731 bool rom_device;
732 bool flush_coalesced_mmio;
733 uint8_t dirty_log_mask;
734 bool is_iommu;
735 RAMBlock *ram_block;
736 Object *owner;
738 const MemoryRegionOps *ops;
739 void *opaque;
740 MemoryRegion *container;
741 Int128 size;
742 hwaddr addr;
743 void (*destructor)(MemoryRegion *mr);
744 uint64_t align;
745 bool terminates;
746 bool ram_device;
747 bool enabled;
748 bool warning_printed; /* For reservations */
749 uint8_t vga_logging_count;
750 MemoryRegion *alias;
751 hwaddr alias_offset;
752 int32_t priority;
753 QTAILQ_HEAD(, MemoryRegion) subregions;
754 QTAILQ_ENTRY(MemoryRegion) subregions_link;
755 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
756 const char *name;
757 unsigned ioeventfd_nb;
758 MemoryRegionIoeventfd *ioeventfds;
759 RamDiscardManager *rdm; /* Only for RAM */
762 struct IOMMUMemoryRegion {
763 MemoryRegion parent_obj;
765 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
766 IOMMUNotifierFlag iommu_notify_flags;
769 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
770 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
773 * struct MemoryListener: callbacks structure for updates to the physical memory map
775 * Allows a component to adjust to changes in the guest-visible memory map.
776 * Use with memory_listener_register() and memory_listener_unregister().
778 struct MemoryListener {
780 * @begin:
782 * Called at the beginning of an address space update transaction.
783 * Followed by calls to #MemoryListener.region_add(),
784 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
785 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
786 * increasing address order.
788 * @listener: The #MemoryListener.
790 void (*begin)(MemoryListener *listener);
793 * @commit:
795 * Called at the end of an address space update transaction,
796 * after the last call to #MemoryListener.region_add(),
797 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
798 * #MemoryListener.log_start() and #MemoryListener.log_stop().
800 * @listener: The #MemoryListener.
802 void (*commit)(MemoryListener *listener);
805 * @region_add:
807 * Called during an address space update transaction,
808 * for a section of the address space that is new in this address space
809 * space since the last transaction.
811 * @listener: The #MemoryListener.
812 * @section: The new #MemoryRegionSection.
814 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
817 * @region_del:
819 * Called during an address space update transaction,
820 * for a section of the address space that has disappeared in the address
821 * space since the last transaction.
823 * @listener: The #MemoryListener.
824 * @section: The old #MemoryRegionSection.
826 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
829 * @region_nop:
831 * Called during an address space update transaction,
832 * for a section of the address space that is in the same place in the address
833 * space as in the last transaction.
835 * @listener: The #MemoryListener.
836 * @section: The #MemoryRegionSection.
838 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
841 * @log_start:
843 * Called during an address space update transaction, after
844 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
845 * #MemoryListener.region_nop(), if dirty memory logging clients have
846 * become active since the last transaction.
848 * @listener: The #MemoryListener.
849 * @section: The #MemoryRegionSection.
850 * @old: A bitmap of dirty memory logging clients that were active in
851 * the previous transaction.
852 * @new: A bitmap of dirty memory logging clients that are active in
853 * the current transaction.
855 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
856 int old, int new);
859 * @log_stop:
861 * Called during an address space update transaction, after
862 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
863 * #MemoryListener.region_nop() and possibly after
864 * #MemoryListener.log_start(), if dirty memory logging clients have
865 * become inactive since the last transaction.
867 * @listener: The #MemoryListener.
868 * @section: The #MemoryRegionSection.
869 * @old: A bitmap of dirty memory logging clients that were active in
870 * the previous transaction.
871 * @new: A bitmap of dirty memory logging clients that are active in
872 * the current transaction.
874 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
875 int old, int new);
878 * @log_sync:
880 * Called by memory_region_snapshot_and_clear_dirty() and
881 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
882 * copy of the dirty memory bitmap for a #MemoryRegionSection.
884 * @listener: The #MemoryListener.
885 * @section: The #MemoryRegionSection.
887 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
890 * @log_sync_global:
892 * This is the global version of @log_sync when the listener does
893 * not have a way to synchronize the log with finer granularity.
894 * When the listener registers with @log_sync_global defined, then
895 * its @log_sync must be NULL. Vice versa.
897 * @listener: The #MemoryListener.
899 void (*log_sync_global)(MemoryListener *listener);
902 * @log_clear:
904 * Called before reading the dirty memory bitmap for a
905 * #MemoryRegionSection.
907 * @listener: The #MemoryListener.
908 * @section: The #MemoryRegionSection.
910 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
913 * @log_global_start:
915 * Called by memory_global_dirty_log_start(), which
916 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
917 * the address space. #MemoryListener.log_global_start() is also
918 * called when a #MemoryListener is added, if global dirty logging is
919 * active at that time.
921 * @listener: The #MemoryListener.
923 void (*log_global_start)(MemoryListener *listener);
926 * @log_global_stop:
928 * Called by memory_global_dirty_log_stop(), which
929 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
930 * the address space.
932 * @listener: The #MemoryListener.
934 void (*log_global_stop)(MemoryListener *listener);
937 * @log_global_after_sync:
939 * Called after reading the dirty memory bitmap
940 * for any #MemoryRegionSection.
942 * @listener: The #MemoryListener.
944 void (*log_global_after_sync)(MemoryListener *listener);
947 * @eventfd_add:
949 * Called during an address space update transaction,
950 * for a section of the address space that has had a new ioeventfd
951 * registration since the last transaction.
953 * @listener: The #MemoryListener.
954 * @section: The new #MemoryRegionSection.
955 * @match_data: The @match_data parameter for the new ioeventfd.
956 * @data: The @data parameter for the new ioeventfd.
957 * @e: The #EventNotifier parameter for the new ioeventfd.
959 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
960 bool match_data, uint64_t data, EventNotifier *e);
963 * @eventfd_del:
965 * Called during an address space update transaction,
966 * for a section of the address space that has dropped an ioeventfd
967 * registration since the last transaction.
969 * @listener: The #MemoryListener.
970 * @section: The new #MemoryRegionSection.
971 * @match_data: The @match_data parameter for the dropped ioeventfd.
972 * @data: The @data parameter for the dropped ioeventfd.
973 * @e: The #EventNotifier parameter for the dropped ioeventfd.
975 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
976 bool match_data, uint64_t data, EventNotifier *e);
979 * @coalesced_io_add:
981 * Called during an address space update transaction,
982 * for a section of the address space that has had a new coalesced
983 * MMIO range registration since the last transaction.
985 * @listener: The #MemoryListener.
986 * @section: The new #MemoryRegionSection.
987 * @addr: The starting address for the coalesced MMIO range.
988 * @len: The length of the coalesced MMIO range.
990 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
991 hwaddr addr, hwaddr len);
994 * @coalesced_io_del:
996 * Called during an address space update transaction,
997 * for a section of the address space that has dropped a coalesced
998 * MMIO range since the last transaction.
1000 * @listener: The #MemoryListener.
1001 * @section: The new #MemoryRegionSection.
1002 * @addr: The starting address for the coalesced MMIO range.
1003 * @len: The length of the coalesced MMIO range.
1005 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
1006 hwaddr addr, hwaddr len);
1008 * @priority:
1010 * Govern the order in which memory listeners are invoked. Lower priorities
1011 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
1012 * or "stop" callbacks.
1014 unsigned priority;
1017 * @name:
1019 * Name of the listener. It can be used in contexts where we'd like to
1020 * identify one memory listener with the rest.
1022 const char *name;
1024 /* private: */
1025 AddressSpace *address_space;
1026 QTAILQ_ENTRY(MemoryListener) link;
1027 QTAILQ_ENTRY(MemoryListener) link_as;
1031 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
1033 struct AddressSpace {
1034 /* private: */
1035 struct rcu_head rcu;
1036 char *name;
1037 MemoryRegion *root;
1039 /* Accessed via RCU. */
1040 struct FlatView *current_map;
1042 int ioeventfd_nb;
1043 struct MemoryRegionIoeventfd *ioeventfds;
1044 QTAILQ_HEAD(, MemoryListener) listeners;
1045 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
1048 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1049 typedef struct FlatRange FlatRange;
1051 /* Flattened global view of current active memory hierarchy. Kept in sorted
1052 * order.
1054 struct FlatView {
1055 struct rcu_head rcu;
1056 unsigned ref;
1057 FlatRange *ranges;
1058 unsigned nr;
1059 unsigned nr_allocated;
1060 struct AddressSpaceDispatch *dispatch;
1061 MemoryRegion *root;
1064 static inline FlatView *address_space_to_flatview(AddressSpace *as)
1066 return qatomic_rcu_read(&as->current_map);
1070 * typedef flatview_cb: callback for flatview_for_each_range()
1072 * @start: start address of the range within the FlatView
1073 * @len: length of the range in bytes
1074 * @mr: MemoryRegion covering this range
1075 * @offset_in_region: offset of the first byte of the range within @mr
1076 * @opaque: data pointer passed to flatview_for_each_range()
1078 * Returns: true to stop the iteration, false to keep going.
1080 typedef bool (*flatview_cb)(Int128 start,
1081 Int128 len,
1082 const MemoryRegion *mr,
1083 hwaddr offset_in_region,
1084 void *opaque);
1087 * flatview_for_each_range: Iterate through a FlatView
1088 * @fv: the FlatView to iterate through
1089 * @cb: function to call for each range
1090 * @opaque: opaque data pointer to pass to @cb
1092 * A FlatView is made up of a list of non-overlapping ranges, each of
1093 * which is a slice of a MemoryRegion. This function iterates through
1094 * each range in @fv, calling @cb. The callback function can terminate
1095 * iteration early by returning 'true'.
1097 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
1099 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
1100 MemoryRegionSection *b)
1102 return a->mr == b->mr &&
1103 a->fv == b->fv &&
1104 a->offset_within_region == b->offset_within_region &&
1105 a->offset_within_address_space == b->offset_within_address_space &&
1106 int128_eq(a->size, b->size) &&
1107 a->readonly == b->readonly &&
1108 a->nonvolatile == b->nonvolatile;
1112 * memory_region_section_new_copy: Copy a memory region section
1114 * Allocate memory for a new copy, copy the memory region section, and
1115 * properly take a reference on all relevant members.
1117 * @s: the #MemoryRegionSection to copy
1119 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
1122 * memory_region_section_new_copy: Free a copied memory region section
1124 * Free a copy of a memory section created via memory_region_section_new_copy().
1125 * properly dropping references on all relevant members.
1127 * @s: the #MemoryRegionSection to copy
1129 void memory_region_section_free_copy(MemoryRegionSection *s);
1132 * memory_region_init: Initialize a memory region
1134 * The region typically acts as a container for other memory regions. Use
1135 * memory_region_add_subregion() to add subregions.
1137 * @mr: the #MemoryRegion to be initialized
1138 * @owner: the object that tracks the region's reference count
1139 * @name: used for debugging; not visible to the user or ABI
1140 * @size: size of the region; any subregions beyond this size will be clipped
1142 void memory_region_init(MemoryRegion *mr,
1143 Object *owner,
1144 const char *name,
1145 uint64_t size);
1148 * memory_region_ref: Add 1 to a memory region's reference count
1150 * Whenever memory regions are accessed outside the BQL, they need to be
1151 * preserved against hot-unplug. MemoryRegions actually do not have their
1152 * own reference count; they piggyback on a QOM object, their "owner".
1153 * This function adds a reference to the owner.
1155 * All MemoryRegions must have an owner if they can disappear, even if the
1156 * device they belong to operates exclusively under the BQL. This is because
1157 * the region could be returned at any time by memory_region_find, and this
1158 * is usually under guest control.
1160 * @mr: the #MemoryRegion
1162 void memory_region_ref(MemoryRegion *mr);
1165 * memory_region_unref: Remove 1 to a memory region's reference count
1167 * Whenever memory regions are accessed outside the BQL, they need to be
1168 * preserved against hot-unplug. MemoryRegions actually do not have their
1169 * own reference count; they piggyback on a QOM object, their "owner".
1170 * This function removes a reference to the owner and possibly destroys it.
1172 * @mr: the #MemoryRegion
1174 void memory_region_unref(MemoryRegion *mr);
1177 * memory_region_init_io: Initialize an I/O memory region.
1179 * Accesses into the region will cause the callbacks in @ops to be called.
1180 * if @size is nonzero, subregions will be clipped to @size.
1182 * @mr: the #MemoryRegion to be initialized.
1183 * @owner: the object that tracks the region's reference count
1184 * @ops: a structure containing read and write callbacks to be used when
1185 * I/O is performed on the region.
1186 * @opaque: passed to the read and write callbacks of the @ops structure.
1187 * @name: used for debugging; not visible to the user or ABI
1188 * @size: size of the region.
1190 void memory_region_init_io(MemoryRegion *mr,
1191 Object *owner,
1192 const MemoryRegionOps *ops,
1193 void *opaque,
1194 const char *name,
1195 uint64_t size);
1198 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
1199 * into the region will modify memory
1200 * directly.
1202 * @mr: the #MemoryRegion to be initialized.
1203 * @owner: the object that tracks the region's reference count
1204 * @name: Region name, becomes part of RAMBlock name used in migration stream
1205 * must be unique within any device
1206 * @size: size of the region.
1207 * @errp: pointer to Error*, to store an error if it happens.
1209 * Note that this function does not do anything to cause the data in the
1210 * RAM memory region to be migrated; that is the responsibility of the caller.
1212 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1213 Object *owner,
1214 const char *name,
1215 uint64_t size,
1216 Error **errp);
1219 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
1220 * Accesses into the region will
1221 * modify memory directly.
1223 * @mr: the #MemoryRegion to be initialized.
1224 * @owner: the object that tracks the region's reference count
1225 * @name: Region name, becomes part of RAMBlock name used in migration stream
1226 * must be unique within any device
1227 * @size: size of the region.
1228 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE.
1229 * @errp: pointer to Error*, to store an error if it happens.
1231 * Note that this function does not do anything to cause the data in the
1232 * RAM memory region to be migrated; that is the responsibility of the caller.
1234 void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1235 Object *owner,
1236 const char *name,
1237 uint64_t size,
1238 uint32_t ram_flags,
1239 Error **errp);
1242 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
1243 * RAM. Accesses into the region will
1244 * modify memory directly. Only an initial
1245 * portion of this RAM is actually used.
1246 * Changing the size while migrating
1247 * can result in the migration being
1248 * canceled.
1250 * @mr: the #MemoryRegion to be initialized.
1251 * @owner: the object that tracks the region's reference count
1252 * @name: Region name, becomes part of RAMBlock name used in migration stream
1253 * must be unique within any device
1254 * @size: used size of the region.
1255 * @max_size: max size of the region.
1256 * @resized: callback to notify owner about used size change.
1257 * @errp: pointer to Error*, to store an error if it happens.
1259 * Note that this function does not do anything to cause the data in the
1260 * RAM memory region to be migrated; that is the responsibility of the caller.
1262 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1263 Object *owner,
1264 const char *name,
1265 uint64_t size,
1266 uint64_t max_size,
1267 void (*resized)(const char*,
1268 uint64_t length,
1269 void *host),
1270 Error **errp);
1271 #ifdef CONFIG_POSIX
1274 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1275 * mmap-ed backend.
1277 * @mr: the #MemoryRegion to be initialized.
1278 * @owner: the object that tracks the region's reference count
1279 * @name: Region name, becomes part of RAMBlock name used in migration stream
1280 * must be unique within any device
1281 * @size: size of the region.
1282 * @align: alignment of the region base address; if 0, the default alignment
1283 * (getpagesize()) will be used.
1284 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1285 * RAM_NORESERVE,
1286 * @path: the path in which to allocate the RAM.
1287 * @readonly: true to open @path for reading, false for read/write.
1288 * @errp: pointer to Error*, to store an error if it happens.
1290 * Note that this function does not do anything to cause the data in the
1291 * RAM memory region to be migrated; that is the responsibility of the caller.
1293 void memory_region_init_ram_from_file(MemoryRegion *mr,
1294 Object *owner,
1295 const char *name,
1296 uint64_t size,
1297 uint64_t align,
1298 uint32_t ram_flags,
1299 const char *path,
1300 bool readonly,
1301 Error **errp);
1304 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1305 * mmap-ed backend.
1307 * @mr: the #MemoryRegion to be initialized.
1308 * @owner: the object that tracks the region's reference count
1309 * @name: the name of the region.
1310 * @size: size of the region.
1311 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1312 * RAM_NORESERVE, RAM_PROTECTED.
1313 * @fd: the fd to mmap.
1314 * @offset: offset within the file referenced by fd
1315 * @errp: pointer to Error*, to store an error if it happens.
1317 * Note that this function does not do anything to cause the data in the
1318 * RAM memory region to be migrated; that is the responsibility of the caller.
1320 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1321 Object *owner,
1322 const char *name,
1323 uint64_t size,
1324 uint32_t ram_flags,
1325 int fd,
1326 ram_addr_t offset,
1327 Error **errp);
1328 #endif
1331 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1332 * user-provided pointer. Accesses into the
1333 * region will modify memory directly.
1335 * @mr: the #MemoryRegion to be initialized.
1336 * @owner: the object that tracks the region's reference count
1337 * @name: Region name, becomes part of RAMBlock name used in migration stream
1338 * must be unique within any device
1339 * @size: size of the region.
1340 * @ptr: memory to be mapped; must contain at least @size bytes.
1342 * Note that this function does not do anything to cause the data in the
1343 * RAM memory region to be migrated; that is the responsibility of the caller.
1345 void memory_region_init_ram_ptr(MemoryRegion *mr,
1346 Object *owner,
1347 const char *name,
1348 uint64_t size,
1349 void *ptr);
1352 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1353 * a user-provided pointer.
1355 * A RAM device represents a mapping to a physical device, such as to a PCI
1356 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1357 * into the VM address space and access to the region will modify memory
1358 * directly. However, the memory region should not be included in a memory
1359 * dump (device may not be enabled/mapped at the time of the dump), and
1360 * operations incompatible with manipulating MMIO should be avoided. Replaces
1361 * skip_dump flag.
1363 * @mr: the #MemoryRegion to be initialized.
1364 * @owner: the object that tracks the region's reference count
1365 * @name: the name of the region.
1366 * @size: size of the region.
1367 * @ptr: memory to be mapped; must contain at least @size bytes.
1369 * Note that this function does not do anything to cause the data in the
1370 * RAM memory region to be migrated; that is the responsibility of the caller.
1371 * (For RAM device memory regions, migrating the contents rarely makes sense.)
1373 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1374 Object *owner,
1375 const char *name,
1376 uint64_t size,
1377 void *ptr);
1380 * memory_region_init_alias: Initialize a memory region that aliases all or a
1381 * part of another memory region.
1383 * @mr: the #MemoryRegion to be initialized.
1384 * @owner: the object that tracks the region's reference count
1385 * @name: used for debugging; not visible to the user or ABI
1386 * @orig: the region to be referenced; @mr will be equivalent to
1387 * @orig between @offset and @offset + @size - 1.
1388 * @offset: start of the section in @orig to be referenced.
1389 * @size: size of the region.
1391 void memory_region_init_alias(MemoryRegion *mr,
1392 Object *owner,
1393 const char *name,
1394 MemoryRegion *orig,
1395 hwaddr offset,
1396 uint64_t size);
1399 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1401 * This has the same effect as calling memory_region_init_ram_nomigrate()
1402 * and then marking the resulting region read-only with
1403 * memory_region_set_readonly().
1405 * Note that this function does not do anything to cause the data in the
1406 * RAM side of the memory region to be migrated; that is the responsibility
1407 * of the caller.
1409 * @mr: the #MemoryRegion to be initialized.
1410 * @owner: the object that tracks the region's reference count
1411 * @name: Region name, becomes part of RAMBlock name used in migration stream
1412 * must be unique within any device
1413 * @size: size of the region.
1414 * @errp: pointer to Error*, to store an error if it happens.
1416 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1417 Object *owner,
1418 const char *name,
1419 uint64_t size,
1420 Error **errp);
1423 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1424 * Writes are handled via callbacks.
1426 * Note that this function does not do anything to cause the data in the
1427 * RAM side of the memory region to be migrated; that is the responsibility
1428 * of the caller.
1430 * @mr: the #MemoryRegion to be initialized.
1431 * @owner: the object that tracks the region's reference count
1432 * @ops: callbacks for write access handling (must not be NULL).
1433 * @opaque: passed to the read and write callbacks of the @ops structure.
1434 * @name: Region name, becomes part of RAMBlock name used in migration stream
1435 * must be unique within any device
1436 * @size: size of the region.
1437 * @errp: pointer to Error*, to store an error if it happens.
1439 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1440 Object *owner,
1441 const MemoryRegionOps *ops,
1442 void *opaque,
1443 const char *name,
1444 uint64_t size,
1445 Error **errp);
1448 * memory_region_init_iommu: Initialize a memory region of a custom type
1449 * that translates addresses
1451 * An IOMMU region translates addresses and forwards accesses to a target
1452 * memory region.
1454 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1455 * @_iommu_mr should be a pointer to enough memory for an instance of
1456 * that subclass, @instance_size is the size of that subclass, and
1457 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1458 * instance of the subclass, and its methods will then be called to handle
1459 * accesses to the memory region. See the documentation of
1460 * #IOMMUMemoryRegionClass for further details.
1462 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1463 * @instance_size: the IOMMUMemoryRegion subclass instance size
1464 * @mrtypename: the type name of the #IOMMUMemoryRegion
1465 * @owner: the object that tracks the region's reference count
1466 * @name: used for debugging; not visible to the user or ABI
1467 * @size: size of the region.
1469 void memory_region_init_iommu(void *_iommu_mr,
1470 size_t instance_size,
1471 const char *mrtypename,
1472 Object *owner,
1473 const char *name,
1474 uint64_t size);
1477 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1478 * region will modify memory directly.
1480 * @mr: the #MemoryRegion to be initialized
1481 * @owner: the object that tracks the region's reference count (must be
1482 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1483 * @name: name of the memory region
1484 * @size: size of the region in bytes
1485 * @errp: pointer to Error*, to store an error if it happens.
1487 * This function allocates RAM for a board model or device, and
1488 * arranges for it to be migrated (by calling vmstate_register_ram()
1489 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1490 * @owner is NULL).
1492 * TODO: Currently we restrict @owner to being either NULL (for
1493 * global RAM regions with no owner) or devices, so that we can
1494 * give the RAM block a unique name for migration purposes.
1495 * We should lift this restriction and allow arbitrary Objects.
1496 * If you pass a non-NULL non-device @owner then we will assert.
1498 void memory_region_init_ram(MemoryRegion *mr,
1499 Object *owner,
1500 const char *name,
1501 uint64_t size,
1502 Error **errp);
1505 * memory_region_init_rom: Initialize a ROM memory region.
1507 * This has the same effect as calling memory_region_init_ram()
1508 * and then marking the resulting region read-only with
1509 * memory_region_set_readonly(). This includes arranging for the
1510 * contents to be migrated.
1512 * TODO: Currently we restrict @owner to being either NULL (for
1513 * global RAM regions with no owner) or devices, so that we can
1514 * give the RAM block a unique name for migration purposes.
1515 * We should lift this restriction and allow arbitrary Objects.
1516 * If you pass a non-NULL non-device @owner then we will assert.
1518 * @mr: the #MemoryRegion to be initialized.
1519 * @owner: the object that tracks the region's reference count
1520 * @name: Region name, becomes part of RAMBlock name used in migration stream
1521 * must be unique within any device
1522 * @size: size of the region.
1523 * @errp: pointer to Error*, to store an error if it happens.
1525 void memory_region_init_rom(MemoryRegion *mr,
1526 Object *owner,
1527 const char *name,
1528 uint64_t size,
1529 Error **errp);
1532 * memory_region_init_rom_device: Initialize a ROM memory region.
1533 * Writes are handled via callbacks.
1535 * This function initializes a memory region backed by RAM for reads
1536 * and callbacks for writes, and arranges for the RAM backing to
1537 * be migrated (by calling vmstate_register_ram()
1538 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1539 * @owner is NULL).
1541 * TODO: Currently we restrict @owner to being either NULL (for
1542 * global RAM regions with no owner) or devices, so that we can
1543 * give the RAM block a unique name for migration purposes.
1544 * We should lift this restriction and allow arbitrary Objects.
1545 * If you pass a non-NULL non-device @owner then we will assert.
1547 * @mr: the #MemoryRegion to be initialized.
1548 * @owner: the object that tracks the region's reference count
1549 * @ops: callbacks for write access handling (must not be NULL).
1550 * @opaque: passed to the read and write callbacks of the @ops structure.
1551 * @name: Region name, becomes part of RAMBlock name used in migration stream
1552 * must be unique within any device
1553 * @size: size of the region.
1554 * @errp: pointer to Error*, to store an error if it happens.
1556 void memory_region_init_rom_device(MemoryRegion *mr,
1557 Object *owner,
1558 const MemoryRegionOps *ops,
1559 void *opaque,
1560 const char *name,
1561 uint64_t size,
1562 Error **errp);
1566 * memory_region_owner: get a memory region's owner.
1568 * @mr: the memory region being queried.
1570 Object *memory_region_owner(MemoryRegion *mr);
1573 * memory_region_size: get a memory region's size.
1575 * @mr: the memory region being queried.
1577 uint64_t memory_region_size(MemoryRegion *mr);
1580 * memory_region_is_ram: check whether a memory region is random access
1582 * Returns %true if a memory region is random access.
1584 * @mr: the memory region being queried
1586 static inline bool memory_region_is_ram(MemoryRegion *mr)
1588 return mr->ram;
1592 * memory_region_is_ram_device: check whether a memory region is a ram device
1594 * Returns %true if a memory region is a device backed ram region
1596 * @mr: the memory region being queried
1598 bool memory_region_is_ram_device(MemoryRegion *mr);
1601 * memory_region_is_romd: check whether a memory region is in ROMD mode
1603 * Returns %true if a memory region is a ROM device and currently set to allow
1604 * direct reads.
1606 * @mr: the memory region being queried
1608 static inline bool memory_region_is_romd(MemoryRegion *mr)
1610 return mr->rom_device && mr->romd_mode;
1614 * memory_region_is_protected: check whether a memory region is protected
1616 * Returns %true if a memory region is protected RAM and cannot be accessed
1617 * via standard mechanisms, e.g. DMA.
1619 * @mr: the memory region being queried
1621 bool memory_region_is_protected(MemoryRegion *mr);
1624 * memory_region_get_iommu: check whether a memory region is an iommu
1626 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1627 * otherwise NULL.
1629 * @mr: the memory region being queried
1631 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1633 if (mr->alias) {
1634 return memory_region_get_iommu(mr->alias);
1636 if (mr->is_iommu) {
1637 return (IOMMUMemoryRegion *) mr;
1639 return NULL;
1643 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1644 * if an iommu or NULL if not
1646 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1647 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1649 * @iommu_mr: the memory region being queried
1651 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1652 IOMMUMemoryRegion *iommu_mr)
1654 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1657 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1660 * memory_region_iommu_get_min_page_size: get minimum supported page size
1661 * for an iommu
1663 * Returns minimum supported page size for an iommu.
1665 * @iommu_mr: the memory region being queried
1667 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1670 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1672 * Note: for any IOMMU implementation, an in-place mapping change
1673 * should be notified with an UNMAP followed by a MAP.
1675 * @iommu_mr: the memory region that was changed
1676 * @iommu_idx: the IOMMU index for the translation table which has changed
1677 * @event: TLB event with the new entry in the IOMMU translation table.
1678 * The entry replaces all old entries for the same virtual I/O address
1679 * range.
1681 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1682 int iommu_idx,
1683 IOMMUTLBEvent event);
1686 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
1687 * entry to a single notifier
1689 * This works just like memory_region_notify_iommu(), but it only
1690 * notifies a specific notifier, not all of them.
1692 * @notifier: the notifier to be notified
1693 * @event: TLB event with the new entry in the IOMMU translation table.
1694 * The entry replaces all old entries for the same virtual I/O address
1695 * range.
1697 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1698 IOMMUTLBEvent *event);
1701 * memory_region_register_iommu_notifier: register a notifier for changes to
1702 * IOMMU translation entries.
1704 * Returns 0 on success, or a negative errno otherwise. In particular,
1705 * -EINVAL indicates that at least one of the attributes of the notifier
1706 * is not supported (flag/range) by the IOMMU memory region. In case of error
1707 * the error object must be created.
1709 * @mr: the memory region to observe
1710 * @n: the IOMMUNotifier to be added; the notify callback receives a
1711 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1712 * ceases to be valid on exit from the notifier.
1713 * @errp: pointer to Error*, to store an error if it happens.
1715 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1716 IOMMUNotifier *n, Error **errp);
1719 * memory_region_iommu_replay: replay existing IOMMU translations to
1720 * a notifier with the minimum page granularity returned by
1721 * mr->iommu_ops->get_page_size().
1723 * Note: this is not related to record-and-replay functionality.
1725 * @iommu_mr: the memory region to observe
1726 * @n: the notifier to which to replay iommu mappings
1728 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1731 * memory_region_unregister_iommu_notifier: unregister a notifier for
1732 * changes to IOMMU translation entries.
1734 * @mr: the memory region which was observed and for which notity_stopped()
1735 * needs to be called
1736 * @n: the notifier to be removed.
1738 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1739 IOMMUNotifier *n);
1742 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1743 * defined on the IOMMU.
1745 * Returns 0 on success, or a negative errno otherwise. In particular,
1746 * -EINVAL indicates that the IOMMU does not support the requested
1747 * attribute.
1749 * @iommu_mr: the memory region
1750 * @attr: the requested attribute
1751 * @data: a pointer to the requested attribute data
1753 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1754 enum IOMMUMemoryRegionAttr attr,
1755 void *data);
1758 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1759 * use for translations with the given memory transaction attributes.
1761 * @iommu_mr: the memory region
1762 * @attrs: the memory transaction attributes
1764 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1765 MemTxAttrs attrs);
1768 * memory_region_iommu_num_indexes: return the total number of IOMMU
1769 * indexes that this IOMMU supports.
1771 * @iommu_mr: the memory region
1773 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1776 * memory_region_iommu_set_page_size_mask: set the supported page
1777 * sizes for a given IOMMU memory region
1779 * @iommu_mr: IOMMU memory region
1780 * @page_size_mask: supported page size mask
1781 * @errp: pointer to Error*, to store an error if it happens.
1783 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1784 uint64_t page_size_mask,
1785 Error **errp);
1788 * memory_region_name: get a memory region's name
1790 * Returns the string that was used to initialize the memory region.
1792 * @mr: the memory region being queried
1794 const char *memory_region_name(const MemoryRegion *mr);
1797 * memory_region_is_logging: return whether a memory region is logging writes
1799 * Returns %true if the memory region is logging writes for the given client
1801 * @mr: the memory region being queried
1802 * @client: the client being queried
1804 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1807 * memory_region_get_dirty_log_mask: return the clients for which a
1808 * memory region is logging writes.
1810 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1811 * are the bit indices.
1813 * @mr: the memory region being queried
1815 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1818 * memory_region_is_rom: check whether a memory region is ROM
1820 * Returns %true if a memory region is read-only memory.
1822 * @mr: the memory region being queried
1824 static inline bool memory_region_is_rom(MemoryRegion *mr)
1826 return mr->ram && mr->readonly;
1830 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1832 * Returns %true is a memory region is non-volatile memory.
1834 * @mr: the memory region being queried
1836 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1838 return mr->nonvolatile;
1842 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1844 * Returns a file descriptor backing a file-based RAM memory region,
1845 * or -1 if the region is not a file-based RAM memory region.
1847 * @mr: the RAM or alias memory region being queried.
1849 int memory_region_get_fd(MemoryRegion *mr);
1852 * memory_region_from_host: Convert a pointer into a RAM memory region
1853 * and an offset within it.
1855 * Given a host pointer inside a RAM memory region (created with
1856 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1857 * the MemoryRegion and the offset within it.
1859 * Use with care; by the time this function returns, the returned pointer is
1860 * not protected by RCU anymore. If the caller is not within an RCU critical
1861 * section and does not hold the iothread lock, it must have other means of
1862 * protecting the pointer, such as a reference to the region that includes
1863 * the incoming ram_addr_t.
1865 * @ptr: the host pointer to be converted
1866 * @offset: the offset within memory region
1868 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1871 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1873 * Returns a host pointer to a RAM memory region (created with
1874 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1876 * Use with care; by the time this function returns, the returned pointer is
1877 * not protected by RCU anymore. If the caller is not within an RCU critical
1878 * section and does not hold the iothread lock, it must have other means of
1879 * protecting the pointer, such as a reference to the region that includes
1880 * the incoming ram_addr_t.
1882 * @mr: the memory region being queried.
1884 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1886 /* memory_region_ram_resize: Resize a RAM region.
1888 * Resizing RAM while migrating can result in the migration being canceled.
1889 * Care has to be taken if the guest might have already detected the memory.
1891 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1892 * @newsize: the new size the region
1893 * @errp: pointer to Error*, to store an error if it happens.
1895 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1896 Error **errp);
1899 * memory_region_msync: Synchronize selected address range of
1900 * a memory mapped region
1902 * @mr: the memory region to be msync
1903 * @addr: the initial address of the range to be sync
1904 * @size: the size of the range to be sync
1906 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
1909 * memory_region_writeback: Trigger cache writeback for
1910 * selected address range
1912 * @mr: the memory region to be updated
1913 * @addr: the initial address of the range to be written back
1914 * @size: the size of the range to be written back
1916 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
1919 * memory_region_set_log: Turn dirty logging on or off for a region.
1921 * Turns dirty logging on or off for a specified client (display, migration).
1922 * Only meaningful for RAM regions.
1924 * @mr: the memory region being updated.
1925 * @log: whether dirty logging is to be enabled or disabled.
1926 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1928 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1931 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1933 * Marks a range of bytes as dirty, after it has been dirtied outside
1934 * guest code.
1936 * @mr: the memory region being dirtied.
1937 * @addr: the address (relative to the start of the region) being dirtied.
1938 * @size: size of the range being dirtied.
1940 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1941 hwaddr size);
1944 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1946 * This function is called when the caller wants to clear the remote
1947 * dirty bitmap of a memory range within the memory region. This can
1948 * be used by e.g. KVM to manually clear dirty log when
1949 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1950 * kernel.
1952 * @mr: the memory region to clear the dirty log upon
1953 * @start: start address offset within the memory region
1954 * @len: length of the memory region to clear dirty bitmap
1956 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
1957 hwaddr len);
1960 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1961 * bitmap and clear it.
1963 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1964 * returns the snapshot. The snapshot can then be used to query dirty
1965 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
1966 * querying the same page multiple times, which is especially useful for
1967 * display updates where the scanlines often are not page aligned.
1969 * The dirty bitmap region which gets copyed into the snapshot (and
1970 * cleared afterwards) can be larger than requested. The boundaries
1971 * are rounded up/down so complete bitmap longs (covering 64 pages on
1972 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1973 * isn't a problem for display updates as the extra pages are outside
1974 * the visible area, and in case the visible area changes a full
1975 * display redraw is due anyway. Should other use cases for this
1976 * function emerge we might have to revisit this implementation
1977 * detail.
1979 * Use g_free to release DirtyBitmapSnapshot.
1981 * @mr: the memory region being queried.
1982 * @addr: the address (relative to the start of the region) being queried.
1983 * @size: the size of the range being queried.
1984 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1986 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1987 hwaddr addr,
1988 hwaddr size,
1989 unsigned client);
1992 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1993 * in the specified dirty bitmap snapshot.
1995 * @mr: the memory region being queried.
1996 * @snap: the dirty bitmap snapshot
1997 * @addr: the address (relative to the start of the region) being queried.
1998 * @size: the size of the range being queried.
2000 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
2001 DirtyBitmapSnapshot *snap,
2002 hwaddr addr, hwaddr size);
2005 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2006 * client.
2008 * Marks a range of pages as no longer dirty.
2010 * @mr: the region being updated.
2011 * @addr: the start of the subrange being cleaned.
2012 * @size: the size of the subrange being cleaned.
2013 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2014 * %DIRTY_MEMORY_VGA.
2016 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2017 hwaddr size, unsigned client);
2020 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2021 * TBs (for self-modifying code).
2023 * The MemoryRegionOps->write() callback of a ROM device must use this function
2024 * to mark byte ranges that have been modified internally, such as by directly
2025 * accessing the memory returned by memory_region_get_ram_ptr().
2027 * This function marks the range dirty and invalidates TBs so that TCG can
2028 * detect self-modifying code.
2030 * @mr: the region being flushed.
2031 * @addr: the start, relative to the start of the region, of the range being
2032 * flushed.
2033 * @size: the size, in bytes, of the range being flushed.
2035 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2038 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2040 * Allows a memory region to be marked as read-only (turning it into a ROM).
2041 * only useful on RAM regions.
2043 * @mr: the region being updated.
2044 * @readonly: whether rhe region is to be ROM or RAM.
2046 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2049 * memory_region_set_nonvolatile: Turn a memory region non-volatile
2051 * Allows a memory region to be marked as non-volatile.
2052 * only useful on RAM regions.
2054 * @mr: the region being updated.
2055 * @nonvolatile: whether rhe region is to be non-volatile.
2057 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2060 * memory_region_rom_device_set_romd: enable/disable ROMD mode
2062 * Allows a ROM device (initialized with memory_region_init_rom_device() to
2063 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
2064 * device is mapped to guest memory and satisfies read access directly.
2065 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
2066 * Writes are always handled by the #MemoryRegion.write function.
2068 * @mr: the memory region to be updated
2069 * @romd_mode: %true to put the region into ROMD mode
2071 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
2074 * memory_region_set_coalescing: Enable memory coalescing for the region.
2076 * Enabled writes to a region to be queued for later processing. MMIO ->write
2077 * callbacks may be delayed until a non-coalesced MMIO is issued.
2078 * Only useful for IO regions. Roughly similar to write-combining hardware.
2080 * @mr: the memory region to be write coalesced
2082 void memory_region_set_coalescing(MemoryRegion *mr);
2085 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2086 * a region.
2088 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2089 * Multiple calls can be issued coalesced disjoint ranges.
2091 * @mr: the memory region to be updated.
2092 * @offset: the start of the range within the region to be coalesced.
2093 * @size: the size of the subrange to be coalesced.
2095 void memory_region_add_coalescing(MemoryRegion *mr,
2096 hwaddr offset,
2097 uint64_t size);
2100 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2102 * Disables any coalescing caused by memory_region_set_coalescing() or
2103 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
2104 * hardware.
2106 * @mr: the memory region to be updated.
2108 void memory_region_clear_coalescing(MemoryRegion *mr);
2111 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2112 * accesses.
2114 * Ensure that pending coalesced MMIO request are flushed before the memory
2115 * region is accessed. This property is automatically enabled for all regions
2116 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2118 * @mr: the memory region to be updated.
2120 void memory_region_set_flush_coalesced(MemoryRegion *mr);
2123 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2124 * accesses.
2126 * Clear the automatic coalesced MMIO flushing enabled via
2127 * memory_region_set_flush_coalesced. Note that this service has no effect on
2128 * memory regions that have MMIO coalescing enabled for themselves. For them,
2129 * automatic flushing will stop once coalescing is disabled.
2131 * @mr: the memory region to be updated.
2133 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2136 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2137 * is written to a location.
2139 * Marks a word in an IO region (initialized with memory_region_init_io())
2140 * as a trigger for an eventfd event. The I/O callback will not be called.
2141 * The caller must be prepared to handle failure (that is, take the required
2142 * action if the callback _is_ called).
2144 * @mr: the memory region being updated.
2145 * @addr: the address within @mr that is to be monitored
2146 * @size: the size of the access to trigger the eventfd
2147 * @match_data: whether to match against @data, instead of just @addr
2148 * @data: the data to match against the guest write
2149 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2151 void memory_region_add_eventfd(MemoryRegion *mr,
2152 hwaddr addr,
2153 unsigned size,
2154 bool match_data,
2155 uint64_t data,
2156 EventNotifier *e);
2159 * memory_region_del_eventfd: Cancel an eventfd.
2161 * Cancels an eventfd trigger requested by a previous
2162 * memory_region_add_eventfd() call.
2164 * @mr: the memory region being updated.
2165 * @addr: the address within @mr that is to be monitored
2166 * @size: the size of the access to trigger the eventfd
2167 * @match_data: whether to match against @data, instead of just @addr
2168 * @data: the data to match against the guest write
2169 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2171 void memory_region_del_eventfd(MemoryRegion *mr,
2172 hwaddr addr,
2173 unsigned size,
2174 bool match_data,
2175 uint64_t data,
2176 EventNotifier *e);
2179 * memory_region_add_subregion: Add a subregion to a container.
2181 * Adds a subregion at @offset. The subregion may not overlap with other
2182 * subregions (except for those explicitly marked as overlapping). A region
2183 * may only be added once as a subregion (unless removed with
2184 * memory_region_del_subregion()); use memory_region_init_alias() if you
2185 * want a region to be a subregion in multiple locations.
2187 * @mr: the region to contain the new subregion; must be a container
2188 * initialized with memory_region_init().
2189 * @offset: the offset relative to @mr where @subregion is added.
2190 * @subregion: the subregion to be added.
2192 void memory_region_add_subregion(MemoryRegion *mr,
2193 hwaddr offset,
2194 MemoryRegion *subregion);
2196 * memory_region_add_subregion_overlap: Add a subregion to a container
2197 * with overlap.
2199 * Adds a subregion at @offset. The subregion may overlap with other
2200 * subregions. Conflicts are resolved by having a higher @priority hide a
2201 * lower @priority. Subregions without priority are taken as @priority 0.
2202 * A region may only be added once as a subregion (unless removed with
2203 * memory_region_del_subregion()); use memory_region_init_alias() if you
2204 * want a region to be a subregion in multiple locations.
2206 * @mr: the region to contain the new subregion; must be a container
2207 * initialized with memory_region_init().
2208 * @offset: the offset relative to @mr where @subregion is added.
2209 * @subregion: the subregion to be added.
2210 * @priority: used for resolving overlaps; highest priority wins.
2212 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2213 hwaddr offset,
2214 MemoryRegion *subregion,
2215 int priority);
2218 * memory_region_get_ram_addr: Get the ram address associated with a memory
2219 * region
2221 * @mr: the region to be queried
2223 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
2225 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
2227 * memory_region_del_subregion: Remove a subregion.
2229 * Removes a subregion from its container.
2231 * @mr: the container to be updated.
2232 * @subregion: the region being removed; must be a current subregion of @mr.
2234 void memory_region_del_subregion(MemoryRegion *mr,
2235 MemoryRegion *subregion);
2238 * memory_region_set_enabled: dynamically enable or disable a region
2240 * Enables or disables a memory region. A disabled memory region
2241 * ignores all accesses to itself and its subregions. It does not
2242 * obscure sibling subregions with lower priority - it simply behaves as
2243 * if it was removed from the hierarchy.
2245 * Regions default to being enabled.
2247 * @mr: the region to be updated
2248 * @enabled: whether to enable or disable the region
2250 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2253 * memory_region_set_address: dynamically update the address of a region
2255 * Dynamically updates the address of a region, relative to its container.
2256 * May be used on regions are currently part of a memory hierarchy.
2258 * @mr: the region to be updated
2259 * @addr: new address, relative to container region
2261 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2264 * memory_region_set_size: dynamically update the size of a region.
2266 * Dynamically updates the size of a region.
2268 * @mr: the region to be updated
2269 * @size: used size of the region.
2271 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2274 * memory_region_set_alias_offset: dynamically update a memory alias's offset
2276 * Dynamically updates the offset into the target region that an alias points
2277 * to, as if the fourth argument to memory_region_init_alias() has changed.
2279 * @mr: the #MemoryRegion to be updated; should be an alias.
2280 * @offset: the new offset into the target memory region
2282 void memory_region_set_alias_offset(MemoryRegion *mr,
2283 hwaddr offset);
2286 * memory_region_present: checks if an address relative to a @container
2287 * translates into #MemoryRegion within @container
2289 * Answer whether a #MemoryRegion within @container covers the address
2290 * @addr.
2292 * @container: a #MemoryRegion within which @addr is a relative address
2293 * @addr: the area within @container to be searched
2295 bool memory_region_present(MemoryRegion *container, hwaddr addr);
2298 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
2299 * into any address space.
2301 * @mr: a #MemoryRegion which should be checked if it's mapped
2303 bool memory_region_is_mapped(MemoryRegion *mr);
2306 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
2307 * #MemoryRegion
2309 * The #RamDiscardManager cannot change while a memory region is mapped.
2311 * @mr: the #MemoryRegion
2313 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
2316 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
2317 * #RamDiscardManager assigned
2319 * @mr: the #MemoryRegion
2321 static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
2323 return !!memory_region_get_ram_discard_manager(mr);
2327 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
2328 * #MemoryRegion
2330 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
2331 * that does not cover RAM, or a #MemoryRegion that already has a
2332 * #RamDiscardManager assigned.
2334 * @mr: the #MemoryRegion
2335 * @rdm: #RamDiscardManager to set
2337 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2338 RamDiscardManager *rdm);
2341 * memory_region_find: translate an address/size relative to a
2342 * MemoryRegion into a #MemoryRegionSection.
2344 * Locates the first #MemoryRegion within @mr that overlaps the range
2345 * given by @addr and @size.
2347 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2348 * It will have the following characteristics:
2349 * - @size = 0 iff no overlap was found
2350 * - @mr is non-%NULL iff an overlap was found
2352 * Remember that in the return value the @offset_within_region is
2353 * relative to the returned region (in the .@mr field), not to the
2354 * @mr argument.
2356 * Similarly, the .@offset_within_address_space is relative to the
2357 * address space that contains both regions, the passed and the
2358 * returned one. However, in the special case where the @mr argument
2359 * has no container (and thus is the root of the address space), the
2360 * following will hold:
2361 * - @offset_within_address_space >= @addr
2362 * - @offset_within_address_space + .@size <= @addr + @size
2364 * @mr: a MemoryRegion within which @addr is a relative address
2365 * @addr: start of the area within @as to be searched
2366 * @size: size of the area to be searched
2368 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2369 hwaddr addr, uint64_t size);
2372 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2374 * Synchronizes the dirty page log for all address spaces.
2376 void memory_global_dirty_log_sync(void);
2379 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2381 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2382 * This function must be called after the dirty log bitmap is cleared, and
2383 * before dirty guest memory pages are read. If you are using
2384 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2385 * care of doing this.
2387 void memory_global_after_dirty_log_sync(void);
2390 * memory_region_transaction_begin: Start a transaction.
2392 * During a transaction, changes will be accumulated and made visible
2393 * only when the transaction ends (is committed).
2395 void memory_region_transaction_begin(void);
2398 * memory_region_transaction_commit: Commit a transaction and make changes
2399 * visible to the guest.
2401 void memory_region_transaction_commit(void);
2404 * memory_listener_register: register callbacks to be called when memory
2405 * sections are mapped or unmapped into an address
2406 * space
2408 * @listener: an object containing the callbacks to be called
2409 * @filter: if non-%NULL, only regions in this address space will be observed
2411 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2414 * memory_listener_unregister: undo the effect of memory_listener_register()
2416 * @listener: an object containing the callbacks to be removed
2418 void memory_listener_unregister(MemoryListener *listener);
2421 * memory_global_dirty_log_start: begin dirty logging for all regions
2423 * @flags: purpose of starting dirty log, migration or dirty rate
2425 void memory_global_dirty_log_start(unsigned int flags);
2428 * memory_global_dirty_log_stop: end dirty logging for all regions
2430 * @flags: purpose of stopping dirty log, migration or dirty rate
2432 void memory_global_dirty_log_stop(unsigned int flags);
2434 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2437 * memory_region_dispatch_read: perform a read directly to the specified
2438 * MemoryRegion.
2440 * @mr: #MemoryRegion to access
2441 * @addr: address within that region
2442 * @pval: pointer to uint64_t which the data is written to
2443 * @op: size, sign, and endianness of the memory operation
2444 * @attrs: memory transaction attributes to use for the access
2446 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2447 hwaddr addr,
2448 uint64_t *pval,
2449 MemOp op,
2450 MemTxAttrs attrs);
2452 * memory_region_dispatch_write: perform a write directly to the specified
2453 * MemoryRegion.
2455 * @mr: #MemoryRegion to access
2456 * @addr: address within that region
2457 * @data: data to write
2458 * @op: size, sign, and endianness of the memory operation
2459 * @attrs: memory transaction attributes to use for the access
2461 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2462 hwaddr addr,
2463 uint64_t data,
2464 MemOp op,
2465 MemTxAttrs attrs);
2468 * address_space_init: initializes an address space
2470 * @as: an uninitialized #AddressSpace
2471 * @root: a #MemoryRegion that routes addresses for the address space
2472 * @name: an address space name. The name is only used for debugging
2473 * output.
2475 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2478 * address_space_destroy: destroy an address space
2480 * Releases all resources associated with an address space. After an address space
2481 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2482 * as well.
2484 * @as: address space to be destroyed
2486 void address_space_destroy(AddressSpace *as);
2489 * address_space_remove_listeners: unregister all listeners of an address space
2491 * Removes all callbacks previously registered with memory_listener_register()
2492 * for @as.
2494 * @as: an initialized #AddressSpace
2496 void address_space_remove_listeners(AddressSpace *as);
2499 * address_space_rw: read from or write to an address space.
2501 * Return a MemTxResult indicating whether the operation succeeded
2502 * or failed (eg unassigned memory, device rejected the transaction,
2503 * IOMMU fault).
2505 * @as: #AddressSpace to be accessed
2506 * @addr: address within that address space
2507 * @attrs: memory transaction attributes
2508 * @buf: buffer with the data transferred
2509 * @len: the number of bytes to read or write
2510 * @is_write: indicates the transfer direction
2512 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2513 MemTxAttrs attrs, void *buf,
2514 hwaddr len, bool is_write);
2517 * address_space_write: write to address space.
2519 * Return a MemTxResult indicating whether the operation succeeded
2520 * or failed (eg unassigned memory, device rejected the transaction,
2521 * IOMMU fault).
2523 * @as: #AddressSpace to be accessed
2524 * @addr: address within that address space
2525 * @attrs: memory transaction attributes
2526 * @buf: buffer with the data transferred
2527 * @len: the number of bytes to write
2529 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2530 MemTxAttrs attrs,
2531 const void *buf, hwaddr len);
2534 * address_space_write_rom: write to address space, including ROM.
2536 * This function writes to the specified address space, but will
2537 * write data to both ROM and RAM. This is used for non-guest
2538 * writes like writes from the gdb debug stub or initial loading
2539 * of ROM contents.
2541 * Note that portions of the write which attempt to write data to
2542 * a device will be silently ignored -- only real RAM and ROM will
2543 * be written to.
2545 * Return a MemTxResult indicating whether the operation succeeded
2546 * or failed (eg unassigned memory, device rejected the transaction,
2547 * IOMMU fault).
2549 * @as: #AddressSpace to be accessed
2550 * @addr: address within that address space
2551 * @attrs: memory transaction attributes
2552 * @buf: buffer with the data transferred
2553 * @len: the number of bytes to write
2555 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2556 MemTxAttrs attrs,
2557 const void *buf, hwaddr len);
2559 /* address_space_ld*: load from an address space
2560 * address_space_st*: store to an address space
2562 * These functions perform a load or store of the byte, word,
2563 * longword or quad to the specified address within the AddressSpace.
2564 * The _le suffixed functions treat the data as little endian;
2565 * _be indicates big endian; no suffix indicates "same endianness
2566 * as guest CPU".
2568 * The "guest CPU endianness" accessors are deprecated for use outside
2569 * target-* code; devices should be CPU-agnostic and use either the LE
2570 * or the BE accessors.
2572 * @as #AddressSpace to be accessed
2573 * @addr: address within that address space
2574 * @val: data value, for stores
2575 * @attrs: memory transaction attributes
2576 * @result: location to write the success/failure of the transaction;
2577 * if NULL, this information is discarded
2580 #define SUFFIX
2581 #define ARG1 as
2582 #define ARG1_DECL AddressSpace *as
2583 #include "exec/memory_ldst.h.inc"
2585 #define SUFFIX
2586 #define ARG1 as
2587 #define ARG1_DECL AddressSpace *as
2588 #include "exec/memory_ldst_phys.h.inc"
2590 struct MemoryRegionCache {
2591 void *ptr;
2592 hwaddr xlat;
2593 hwaddr len;
2594 FlatView *fv;
2595 MemoryRegionSection mrs;
2596 bool is_write;
2599 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
2602 /* address_space_ld*_cached: load from a cached #MemoryRegion
2603 * address_space_st*_cached: store into a cached #MemoryRegion
2605 * These functions perform a load or store of the byte, word,
2606 * longword or quad to the specified address. The address is
2607 * a physical address in the AddressSpace, but it must lie within
2608 * a #MemoryRegion that was mapped with address_space_cache_init.
2610 * The _le suffixed functions treat the data as little endian;
2611 * _be indicates big endian; no suffix indicates "same endianness
2612 * as guest CPU".
2614 * The "guest CPU endianness" accessors are deprecated for use outside
2615 * target-* code; devices should be CPU-agnostic and use either the LE
2616 * or the BE accessors.
2618 * @cache: previously initialized #MemoryRegionCache to be accessed
2619 * @addr: address within the address space
2620 * @val: data value, for stores
2621 * @attrs: memory transaction attributes
2622 * @result: location to write the success/failure of the transaction;
2623 * if NULL, this information is discarded
2626 #define SUFFIX _cached_slow
2627 #define ARG1 cache
2628 #define ARG1_DECL MemoryRegionCache *cache
2629 #include "exec/memory_ldst.h.inc"
2631 /* Inline fast path for direct RAM access. */
2632 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2633 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2635 assert(addr < cache->len);
2636 if (likely(cache->ptr)) {
2637 return ldub_p(cache->ptr + addr);
2638 } else {
2639 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2643 static inline void address_space_stb_cached(MemoryRegionCache *cache,
2644 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
2646 assert(addr < cache->len);
2647 if (likely(cache->ptr)) {
2648 stb_p(cache->ptr + addr, val);
2649 } else {
2650 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2654 #define ENDIANNESS _le
2655 #include "exec/memory_ldst_cached.h.inc"
2657 #define ENDIANNESS _be
2658 #include "exec/memory_ldst_cached.h.inc"
2660 #define SUFFIX _cached
2661 #define ARG1 cache
2662 #define ARG1_DECL MemoryRegionCache *cache
2663 #include "exec/memory_ldst_phys.h.inc"
2665 /* address_space_cache_init: prepare for repeated access to a physical
2666 * memory region
2668 * @cache: #MemoryRegionCache to be filled
2669 * @as: #AddressSpace to be accessed
2670 * @addr: address within that address space
2671 * @len: length of buffer
2672 * @is_write: indicates the transfer direction
2674 * Will only work with RAM, and may map a subset of the requested range by
2675 * returning a value that is less than @len. On failure, return a negative
2676 * errno value.
2678 * Because it only works with RAM, this function can be used for
2679 * read-modify-write operations. In this case, is_write should be %true.
2681 * Note that addresses passed to the address_space_*_cached functions
2682 * are relative to @addr.
2684 int64_t address_space_cache_init(MemoryRegionCache *cache,
2685 AddressSpace *as,
2686 hwaddr addr,
2687 hwaddr len,
2688 bool is_write);
2691 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2693 * @cache: The #MemoryRegionCache to operate on.
2694 * @addr: The first physical address that was written, relative to the
2695 * address that was passed to @address_space_cache_init.
2696 * @access_len: The number of bytes that were written starting at @addr.
2698 void address_space_cache_invalidate(MemoryRegionCache *cache,
2699 hwaddr addr,
2700 hwaddr access_len);
2703 * address_space_cache_destroy: free a #MemoryRegionCache
2705 * @cache: The #MemoryRegionCache whose memory should be released.
2707 void address_space_cache_destroy(MemoryRegionCache *cache);
2709 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2710 * entry. Should be called from an RCU critical section.
2712 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2713 bool is_write, MemTxAttrs attrs);
2715 /* address_space_translate: translate an address range into an address space
2716 * into a MemoryRegion and an address range into that section. Should be
2717 * called from an RCU critical section, to avoid that the last reference
2718 * to the returned region disappears after address_space_translate returns.
2720 * @fv: #FlatView to be accessed
2721 * @addr: address within that address space
2722 * @xlat: pointer to address within the returned memory region section's
2723 * #MemoryRegion.
2724 * @len: pointer to length
2725 * @is_write: indicates the transfer direction
2726 * @attrs: memory attributes
2728 MemoryRegion *flatview_translate(FlatView *fv,
2729 hwaddr addr, hwaddr *xlat,
2730 hwaddr *len, bool is_write,
2731 MemTxAttrs attrs);
2733 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2734 hwaddr addr, hwaddr *xlat,
2735 hwaddr *len, bool is_write,
2736 MemTxAttrs attrs)
2738 return flatview_translate(address_space_to_flatview(as),
2739 addr, xlat, len, is_write, attrs);
2742 /* address_space_access_valid: check for validity of accessing an address
2743 * space range
2745 * Check whether memory is assigned to the given address space range, and
2746 * access is permitted by any IOMMU regions that are active for the address
2747 * space.
2749 * For now, addr and len should be aligned to a page size. This limitation
2750 * will be lifted in the future.
2752 * @as: #AddressSpace to be accessed
2753 * @addr: address within that address space
2754 * @len: length of the area to be checked
2755 * @is_write: indicates the transfer direction
2756 * @attrs: memory attributes
2758 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2759 bool is_write, MemTxAttrs attrs);
2761 /* address_space_map: map a physical memory region into a host virtual address
2763 * May map a subset of the requested range, given by and returned in @plen.
2764 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2765 * the mapping are exhausted.
2766 * Use only for reads OR writes - not for read-modify-write operations.
2767 * Use cpu_register_map_client() to know when retrying the map operation is
2768 * likely to succeed.
2770 * @as: #AddressSpace to be accessed
2771 * @addr: address within that address space
2772 * @plen: pointer to length of buffer; updated on return
2773 * @is_write: indicates the transfer direction
2774 * @attrs: memory attributes
2776 void *address_space_map(AddressSpace *as, hwaddr addr,
2777 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2779 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2781 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2782 * the amount of memory that was actually read or written by the caller.
2784 * @as: #AddressSpace used
2785 * @buffer: host pointer as returned by address_space_map()
2786 * @len: buffer length as returned by address_space_map()
2787 * @access_len: amount of data actually transferred
2788 * @is_write: indicates the transfer direction
2790 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2791 bool is_write, hwaddr access_len);
2794 /* Internal functions, part of the implementation of address_space_read. */
2795 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2796 MemTxAttrs attrs, void *buf, hwaddr len);
2797 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2798 MemTxAttrs attrs, void *buf,
2799 hwaddr len, hwaddr addr1, hwaddr l,
2800 MemoryRegion *mr);
2801 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2803 /* Internal functions, part of the implementation of address_space_read_cached
2804 * and address_space_write_cached. */
2805 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
2806 hwaddr addr, void *buf, hwaddr len);
2807 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
2808 hwaddr addr, const void *buf,
2809 hwaddr len);
2811 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2813 if (is_write) {
2814 return memory_region_is_ram(mr) && !mr->readonly &&
2815 !mr->rom_device && !memory_region_is_ram_device(mr);
2816 } else {
2817 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2818 memory_region_is_romd(mr);
2823 * address_space_read: read from an address space.
2825 * Return a MemTxResult indicating whether the operation succeeded
2826 * or failed (eg unassigned memory, device rejected the transaction,
2827 * IOMMU fault). Called within RCU critical section.
2829 * @as: #AddressSpace to be accessed
2830 * @addr: address within that address space
2831 * @attrs: memory transaction attributes
2832 * @buf: buffer with the data transferred
2833 * @len: length of the data transferred
2835 static inline __attribute__((__always_inline__))
2836 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2837 MemTxAttrs attrs, void *buf,
2838 hwaddr len)
2840 MemTxResult result = MEMTX_OK;
2841 hwaddr l, addr1;
2842 void *ptr;
2843 MemoryRegion *mr;
2844 FlatView *fv;
2846 if (__builtin_constant_p(len)) {
2847 if (len) {
2848 RCU_READ_LOCK_GUARD();
2849 fv = address_space_to_flatview(as);
2850 l = len;
2851 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2852 if (len == l && memory_access_is_direct(mr, false)) {
2853 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2854 memcpy(buf, ptr, len);
2855 } else {
2856 result = flatview_read_continue(fv, addr, attrs, buf, len,
2857 addr1, l, mr);
2860 } else {
2861 result = address_space_read_full(as, addr, attrs, buf, len);
2863 return result;
2867 * address_space_read_cached: read from a cached RAM region
2869 * @cache: Cached region to be addressed
2870 * @addr: address relative to the base of the RAM region
2871 * @buf: buffer with the data transferred
2872 * @len: length of the data transferred
2874 static inline MemTxResult
2875 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2876 void *buf, hwaddr len)
2878 assert(addr < cache->len && len <= cache->len - addr);
2879 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
2880 if (likely(cache->ptr)) {
2881 memcpy(buf, cache->ptr + addr, len);
2882 return MEMTX_OK;
2883 } else {
2884 return address_space_read_cached_slow(cache, addr, buf, len);
2889 * address_space_write_cached: write to a cached RAM region
2891 * @cache: Cached region to be addressed
2892 * @addr: address relative to the base of the RAM region
2893 * @buf: buffer with the data transferred
2894 * @len: length of the data transferred
2896 static inline MemTxResult
2897 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2898 const void *buf, hwaddr len)
2900 assert(addr < cache->len && len <= cache->len - addr);
2901 if (likely(cache->ptr)) {
2902 memcpy(cache->ptr + addr, buf, len);
2903 return MEMTX_OK;
2904 } else {
2905 return address_space_write_cached_slow(cache, addr, buf, len);
2909 #ifdef NEED_CPU_H
2910 /* enum device_endian to MemOp. */
2911 static inline MemOp devend_memop(enum device_endian end)
2913 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
2914 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
2916 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
2917 /* Swap if non-host endianness or native (target) endianness */
2918 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
2919 #else
2920 const int non_host_endianness =
2921 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
2923 /* In this case, native (target) endianness needs no swap. */
2924 return (end == non_host_endianness) ? MO_BSWAP : 0;
2925 #endif
2927 #endif
2930 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
2931 * to manage the actual amount of memory consumed by the VM (then, the memory
2932 * provided by RAM blocks might be bigger than the desired memory consumption).
2933 * This *must* be set if:
2934 * - Discarding parts of a RAM blocks does not result in the change being
2935 * reflected in the VM and the pages getting freed.
2936 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
2937 * discards blindly.
2938 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
2939 * encrypted VMs).
2940 * Technologies that only temporarily pin the current working set of a
2941 * driver are fine, because we don't expect such pages to be discarded
2942 * (esp. based on guest action like balloon inflation).
2944 * This is *not* to be used to protect from concurrent discards (esp.,
2945 * postcopy).
2947 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
2948 * discards to work reliably is active.
2950 int ram_block_discard_disable(bool state);
2953 * See ram_block_discard_disable(): only disable uncoordinated discards,
2954 * keeping coordinated discards (via the RamDiscardManager) enabled.
2956 int ram_block_uncoordinated_discard_disable(bool state);
2959 * Inhibit technologies that disable discarding of pages in RAM blocks.
2961 * Returns 0 if successful. Returns -EBUSY if discards are already set to
2962 * broken.
2964 int ram_block_discard_require(bool state);
2967 * See ram_block_discard_require(): only inhibit technologies that disable
2968 * uncoordinated discarding of pages in RAM blocks, allowing co-existance with
2969 * technologies that only inhibit uncoordinated discards (via the
2970 * RamDiscardManager).
2972 int ram_block_coordinated_discard_require(bool state);
2975 * Test if any discarding of memory in ram blocks is disabled.
2977 bool ram_block_discard_is_disabled(void);
2980 * Test if any discarding of memory in ram blocks is required to work reliably.
2982 bool ram_block_discard_is_required(void);
2984 #endif
2986 #endif