2 * Physical memory management API
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #ifndef CONFIG_USER_ONLY
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/ramlist.h"
23 #include "qemu/queue.h"
24 #include "qemu/int128.h"
25 #include "qemu/notify.h"
26 #include "qom/object.h"
28 #include "hw/qdev-core.h"
30 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
32 #define MAX_PHYS_ADDR_SPACE_BITS 62
33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
35 #define TYPE_MEMORY_REGION "qemu:memory-region"
36 #define MEMORY_REGION(obj) \
37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
39 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
40 #define IOMMU_MEMORY_REGION(obj) \
41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
42 #define IOMMU_MEMORY_REGION_CLASS(klass) \
43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
44 TYPE_IOMMU_MEMORY_REGION)
45 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
47 TYPE_IOMMU_MEMORY_REGION)
49 typedef struct MemoryRegionOps MemoryRegionOps
;
50 typedef struct MemoryRegionMmio MemoryRegionMmio
;
52 struct MemoryRegionMmio
{
53 CPUReadMemoryFunc
*read
[3];
54 CPUWriteMemoryFunc
*write
[3];
57 typedef struct IOMMUTLBEntry IOMMUTLBEntry
;
59 /* See address_space_translate: bit 0 is read, bit 1 is write. */
67 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
69 struct IOMMUTLBEntry
{
70 AddressSpace
*target_as
;
72 hwaddr translated_addr
;
73 hwaddr addr_mask
; /* 0xfff = 4k translation */
74 IOMMUAccessFlags perm
;
78 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
79 * register with one or multiple IOMMU Notifier capability bit(s).
82 IOMMU_NOTIFIER_NONE
= 0,
83 /* Notify cache invalidations */
84 IOMMU_NOTIFIER_UNMAP
= 0x1,
85 /* Notify entry changes (newly created entries) */
86 IOMMU_NOTIFIER_MAP
= 0x2,
89 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
92 typedef void (*IOMMUNotify
)(struct IOMMUNotifier
*notifier
,
95 struct IOMMUNotifier
{
97 IOMMUNotifierFlag notifier_flags
;
98 /* Notify for address space range start <= addr <= end */
101 QLIST_ENTRY(IOMMUNotifier
) node
;
103 typedef struct IOMMUNotifier IOMMUNotifier
;
105 static inline void iommu_notifier_init(IOMMUNotifier
*n
, IOMMUNotify fn
,
106 IOMMUNotifierFlag flags
,
107 hwaddr start
, hwaddr end
)
110 n
->notifier_flags
= flags
;
116 * Memory region callbacks
118 struct MemoryRegionOps
{
119 /* Read from the memory region. @addr is relative to @mr; @size is
121 uint64_t (*read
)(void *opaque
,
124 /* Write to the memory region. @addr is relative to @mr; @size is
126 void (*write
)(void *opaque
,
131 MemTxResult (*read_with_attrs
)(void *opaque
,
136 MemTxResult (*write_with_attrs
)(void *opaque
,
141 /* Instruction execution pre-callback:
142 * @addr is the address of the access relative to the @mr.
143 * @size is the size of the area returned by the callback.
144 * @offset is the location of the pointer inside @mr.
146 * Returns a pointer to a location which contains guest code.
148 void *(*request_ptr
)(void *opaque
, hwaddr addr
, unsigned *size
,
151 enum device_endian endianness
;
152 /* Guest-visible constraints: */
154 /* If nonzero, specify bounds on access sizes beyond which a machine
157 unsigned min_access_size
;
158 unsigned max_access_size
;
159 /* If true, unaligned accesses are supported. Otherwise unaligned
160 * accesses throw machine checks.
164 * If present, and returns #false, the transaction is not accepted
165 * by the device (and results in machine dependent behaviour such
166 * as a machine check exception).
168 bool (*accepts
)(void *opaque
, hwaddr addr
,
169 unsigned size
, bool is_write
);
171 /* Internal implementation constraints: */
173 /* If nonzero, specifies the minimum size implemented. Smaller sizes
174 * will be rounded upwards and a partial result will be returned.
176 unsigned min_access_size
;
177 /* If nonzero, specifies the maximum size implemented. Larger sizes
178 * will be done as a series of accesses with smaller sizes.
180 unsigned max_access_size
;
181 /* If true, unaligned accesses are supported. Otherwise all accesses
182 * are converted to (possibly multiple) naturally aligned accesses.
187 /* If .read and .write are not present, old_mmio may be used for
188 * backwards compatibility with old mmio registration
190 const MemoryRegionMmio old_mmio
;
193 typedef struct IOMMUMemoryRegionClass
{
195 struct DeviceClass parent_class
;
198 * Return a TLB entry that contains a given address. Flag should
199 * be the access permission of this translation operation. We can
200 * set flag to IOMMU_NONE to mean that we don't need any
201 * read/write permission checks, like, when for region replay.
203 IOMMUTLBEntry (*translate
)(IOMMUMemoryRegion
*iommu
, hwaddr addr
,
204 IOMMUAccessFlags flag
);
205 /* Returns minimum supported page size */
206 uint64_t (*get_min_page_size
)(IOMMUMemoryRegion
*iommu
);
207 /* Called when IOMMU Notifier flag changed */
208 void (*notify_flag_changed
)(IOMMUMemoryRegion
*iommu
,
209 IOMMUNotifierFlag old_flags
,
210 IOMMUNotifierFlag new_flags
);
211 /* Set this up to provide customized IOMMU replay function */
212 void (*replay
)(IOMMUMemoryRegion
*iommu
, IOMMUNotifier
*notifier
);
213 } IOMMUMemoryRegionClass
;
215 typedef struct CoalescedMemoryRange CoalescedMemoryRange
;
216 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd
;
218 struct MemoryRegion
{
221 /* All fields are private - violators will be prosecuted */
223 /* The following fields should fit in a cache line */
227 bool readonly
; /* For RAM regions */
229 bool flush_coalesced_mmio
;
231 uint8_t dirty_log_mask
;
236 const MemoryRegionOps
*ops
;
238 MemoryRegion
*container
;
241 void (*destructor
)(MemoryRegion
*mr
);
246 bool warning_printed
; /* For reservations */
247 uint8_t vga_logging_count
;
251 QTAILQ_HEAD(subregions
, MemoryRegion
) subregions
;
252 QTAILQ_ENTRY(MemoryRegion
) subregions_link
;
253 QTAILQ_HEAD(coalesced_ranges
, CoalescedMemoryRange
) coalesced
;
255 unsigned ioeventfd_nb
;
256 MemoryRegionIoeventfd
*ioeventfds
;
259 struct IOMMUMemoryRegion
{
260 MemoryRegion parent_obj
;
262 QLIST_HEAD(, IOMMUNotifier
) iommu_notify
;
263 IOMMUNotifierFlag iommu_notify_flags
;
266 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
267 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
270 * MemoryListener: callbacks structure for updates to the physical memory map
272 * Allows a component to adjust to changes in the guest-visible memory map.
273 * Use with memory_listener_register() and memory_listener_unregister().
275 struct MemoryListener
{
276 void (*begin
)(MemoryListener
*listener
);
277 void (*commit
)(MemoryListener
*listener
);
278 void (*region_add
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
279 void (*region_del
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
280 void (*region_nop
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
281 void (*log_start
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
283 void (*log_stop
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
285 void (*log_sync
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
286 void (*log_global_start
)(MemoryListener
*listener
);
287 void (*log_global_stop
)(MemoryListener
*listener
);
288 void (*eventfd_add
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
289 bool match_data
, uint64_t data
, EventNotifier
*e
);
290 void (*eventfd_del
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
291 bool match_data
, uint64_t data
, EventNotifier
*e
);
292 void (*coalesced_mmio_add
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
293 hwaddr addr
, hwaddr len
);
294 void (*coalesced_mmio_del
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
295 hwaddr addr
, hwaddr len
);
296 /* Lower = earlier (during add), later (during del) */
298 AddressSpace
*address_space
;
299 QTAILQ_ENTRY(MemoryListener
) link
;
300 QTAILQ_ENTRY(MemoryListener
) link_as
;
304 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
306 struct AddressSpace
{
307 /* All fields are private. */
314 /* Accessed via RCU. */
315 struct FlatView
*current_map
;
318 struct MemoryRegionIoeventfd
*ioeventfds
;
319 QTAILQ_HEAD(memory_listeners_as
, MemoryListener
) listeners
;
320 QTAILQ_ENTRY(AddressSpace
) address_spaces_link
;
324 * MemoryRegionSection: describes a fragment of a #MemoryRegion
326 * @mr: the region, or %NULL if empty
327 * @address_space: the address space the region is mapped in
328 * @offset_within_region: the beginning of the section, relative to @mr's start
329 * @size: the size of the section; will not exceed @mr's boundaries
330 * @offset_within_address_space: the address of the first byte of the section
331 * relative to the region's address space
332 * @readonly: writes to this section are ignored
334 struct MemoryRegionSection
{
336 AddressSpace
*address_space
;
337 hwaddr offset_within_region
;
339 hwaddr offset_within_address_space
;
344 * memory_region_init: Initialize a memory region
346 * The region typically acts as a container for other memory regions. Use
347 * memory_region_add_subregion() to add subregions.
349 * @mr: the #MemoryRegion to be initialized
350 * @owner: the object that tracks the region's reference count
351 * @name: used for debugging; not visible to the user or ABI
352 * @size: size of the region; any subregions beyond this size will be clipped
354 void memory_region_init(MemoryRegion
*mr
,
355 struct Object
*owner
,
360 * memory_region_ref: Add 1 to a memory region's reference count
362 * Whenever memory regions are accessed outside the BQL, they need to be
363 * preserved against hot-unplug. MemoryRegions actually do not have their
364 * own reference count; they piggyback on a QOM object, their "owner".
365 * This function adds a reference to the owner.
367 * All MemoryRegions must have an owner if they can disappear, even if the
368 * device they belong to operates exclusively under the BQL. This is because
369 * the region could be returned at any time by memory_region_find, and this
370 * is usually under guest control.
372 * @mr: the #MemoryRegion
374 void memory_region_ref(MemoryRegion
*mr
);
377 * memory_region_unref: Remove 1 to a memory region's reference count
379 * Whenever memory regions are accessed outside the BQL, they need to be
380 * preserved against hot-unplug. MemoryRegions actually do not have their
381 * own reference count; they piggyback on a QOM object, their "owner".
382 * This function removes a reference to the owner and possibly destroys it.
384 * @mr: the #MemoryRegion
386 void memory_region_unref(MemoryRegion
*mr
);
389 * memory_region_init_io: Initialize an I/O memory region.
391 * Accesses into the region will cause the callbacks in @ops to be called.
392 * if @size is nonzero, subregions will be clipped to @size.
394 * @mr: the #MemoryRegion to be initialized.
395 * @owner: the object that tracks the region's reference count
396 * @ops: a structure containing read and write callbacks to be used when
397 * I/O is performed on the region.
398 * @opaque: passed to the read and write callbacks of the @ops structure.
399 * @name: used for debugging; not visible to the user or ABI
400 * @size: size of the region.
402 void memory_region_init_io(MemoryRegion
*mr
,
403 struct Object
*owner
,
404 const MemoryRegionOps
*ops
,
410 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
411 * into the region will modify memory
414 * @mr: the #MemoryRegion to be initialized.
415 * @owner: the object that tracks the region's reference count
416 * @name: Region name, becomes part of RAMBlock name used in migration stream
417 * must be unique within any device
418 * @size: size of the region.
419 * @errp: pointer to Error*, to store an error if it happens.
421 * Note that this function does not do anything to cause the data in the
422 * RAM memory region to be migrated; that is the responsibility of the caller.
424 void memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
425 struct Object
*owner
,
431 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
432 * RAM. Accesses into the region will
433 * modify memory directly. Only an initial
434 * portion of this RAM is actually used.
435 * The used size can change across reboots.
437 * @mr: the #MemoryRegion to be initialized.
438 * @owner: the object that tracks the region's reference count
439 * @name: Region name, becomes part of RAMBlock name used in migration stream
440 * must be unique within any device
441 * @size: used size of the region.
442 * @max_size: max size of the region.
443 * @resized: callback to notify owner about used size change.
444 * @errp: pointer to Error*, to store an error if it happens.
446 * Note that this function does not do anything to cause the data in the
447 * RAM memory region to be migrated; that is the responsibility of the caller.
449 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
450 struct Object
*owner
,
454 void (*resized
)(const char*,
460 * memory_region_init_ram_from_file: Initialize RAM memory region with a
463 * @mr: the #MemoryRegion to be initialized.
464 * @owner: the object that tracks the region's reference count
465 * @name: Region name, becomes part of RAMBlock name used in migration stream
466 * must be unique within any device
467 * @size: size of the region.
468 * @share: %true if memory must be mmaped with the MAP_SHARED flag
469 * @path: the path in which to allocate the RAM.
470 * @errp: pointer to Error*, to store an error if it happens.
472 * Note that this function does not do anything to cause the data in the
473 * RAM memory region to be migrated; that is the responsibility of the caller.
475 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
476 struct Object
*owner
,
484 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
487 * @mr: the #MemoryRegion to be initialized.
488 * @owner: the object that tracks the region's reference count
489 * @name: the name of the region.
490 * @size: size of the region.
491 * @share: %true if memory must be mmaped with the MAP_SHARED flag
492 * @fd: the fd to mmap.
493 * @errp: pointer to Error*, to store an error if it happens.
495 * Note that this function does not do anything to cause the data in the
496 * RAM memory region to be migrated; that is the responsibility of the caller.
498 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
499 struct Object
*owner
,
508 * memory_region_init_ram_ptr: Initialize RAM memory region from a
509 * user-provided pointer. Accesses into the
510 * region will modify memory directly.
512 * @mr: the #MemoryRegion to be initialized.
513 * @owner: the object that tracks the region's reference count
514 * @name: Region name, becomes part of RAMBlock name used in migration stream
515 * must be unique within any device
516 * @size: size of the region.
517 * @ptr: memory to be mapped; must contain at least @size bytes.
519 * Note that this function does not do anything to cause the data in the
520 * RAM memory region to be migrated; that is the responsibility of the caller.
522 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
523 struct Object
*owner
,
529 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
530 * a user-provided pointer.
532 * A RAM device represents a mapping to a physical device, such as to a PCI
533 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
534 * into the VM address space and access to the region will modify memory
535 * directly. However, the memory region should not be included in a memory
536 * dump (device may not be enabled/mapped at the time of the dump), and
537 * operations incompatible with manipulating MMIO should be avoided. Replaces
540 * @mr: the #MemoryRegion to be initialized.
541 * @owner: the object that tracks the region's reference count
542 * @name: the name of the region.
543 * @size: size of the region.
544 * @ptr: memory to be mapped; must contain at least @size bytes.
546 * Note that this function does not do anything to cause the data in the
547 * RAM memory region to be migrated; that is the responsibility of the caller.
548 * (For RAM device memory regions, migrating the contents rarely makes sense.)
550 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
551 struct Object
*owner
,
557 * memory_region_init_alias: Initialize a memory region that aliases all or a
558 * part of another memory region.
560 * @mr: the #MemoryRegion to be initialized.
561 * @owner: the object that tracks the region's reference count
562 * @name: used for debugging; not visible to the user or ABI
563 * @orig: the region to be referenced; @mr will be equivalent to
564 * @orig between @offset and @offset + @size - 1.
565 * @offset: start of the section in @orig to be referenced.
566 * @size: size of the region.
568 void memory_region_init_alias(MemoryRegion
*mr
,
569 struct Object
*owner
,
576 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
578 * This has the same effect as calling memory_region_init_ram_nomigrate()
579 * and then marking the resulting region read-only with
580 * memory_region_set_readonly().
582 * Note that this function does not do anything to cause the data in the
583 * RAM side of the memory region to be migrated; that is the responsibility
586 * @mr: the #MemoryRegion to be initialized.
587 * @owner: the object that tracks the region's reference count
588 * @name: Region name, becomes part of RAMBlock name used in migration stream
589 * must be unique within any device
590 * @size: size of the region.
591 * @errp: pointer to Error*, to store an error if it happens.
593 void memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
594 struct Object
*owner
,
600 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
601 * Writes are handled via callbacks.
603 * Note that this function does not do anything to cause the data in the
604 * RAM side of the memory region to be migrated; that is the responsibility
607 * @mr: the #MemoryRegion to be initialized.
608 * @owner: the object that tracks the region's reference count
609 * @ops: callbacks for write access handling (must not be NULL).
610 * @name: Region name, becomes part of RAMBlock name used in migration stream
611 * must be unique within any device
612 * @size: size of the region.
613 * @errp: pointer to Error*, to store an error if it happens.
615 void memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
616 struct Object
*owner
,
617 const MemoryRegionOps
*ops
,
624 * memory_region_init_reservation: Initialize a memory region that reserves
627 * A reservation region primariy serves debugging purposes. It claims I/O
628 * space that is not supposed to be handled by QEMU itself. Any access via
629 * the memory API will cause an abort().
630 * This function is deprecated. Use memory_region_init_io() with NULL
633 * @mr: the #MemoryRegion to be initialized
634 * @owner: the object that tracks the region's reference count
635 * @name: used for debugging; not visible to the user or ABI
636 * @size: size of the region.
638 static inline void memory_region_init_reservation(MemoryRegion
*mr
,
643 memory_region_init_io(mr
, owner
, NULL
, mr
, name
, size
);
647 * memory_region_init_iommu: Initialize a memory region of a custom type
648 * that translates addresses
650 * An IOMMU region translates addresses and forwards accesses to a target
653 * @typename: QOM class name
654 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
655 * @instance_size: the IOMMUMemoryRegion subclass instance size
656 * @owner: the object that tracks the region's reference count
657 * @ops: a function that translates addresses into the @target region
658 * @name: used for debugging; not visible to the user or ABI
659 * @size: size of the region.
661 void memory_region_init_iommu(void *_iommu_mr
,
662 size_t instance_size
,
663 const char *mrtypename
,
669 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
670 * region will modify memory directly.
672 * @mr: the #MemoryRegion to be initialized
673 * @owner: the object that tracks the region's reference count (must be
674 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
675 * @name: name of the memory region
676 * @size: size of the region in bytes
677 * @errp: pointer to Error*, to store an error if it happens.
679 * This function allocates RAM for a board model or device, and
680 * arranges for it to be migrated (by calling vmstate_register_ram()
681 * if @owner is a DeviceState, or vmstate_register_ram_global() if
684 * TODO: Currently we restrict @owner to being either NULL (for
685 * global RAM regions with no owner) or devices, so that we can
686 * give the RAM block a unique name for migration purposes.
687 * We should lift this restriction and allow arbitrary Objects.
688 * If you pass a non-NULL non-device @owner then we will assert.
690 void memory_region_init_ram(MemoryRegion
*mr
,
691 struct Object
*owner
,
697 * memory_region_init_rom: Initialize a ROM memory region.
699 * This has the same effect as calling memory_region_init_ram()
700 * and then marking the resulting region read-only with
701 * memory_region_set_readonly(). This includes arranging for the
702 * contents to be migrated.
704 * TODO: Currently we restrict @owner to being either NULL (for
705 * global RAM regions with no owner) or devices, so that we can
706 * give the RAM block a unique name for migration purposes.
707 * We should lift this restriction and allow arbitrary Objects.
708 * If you pass a non-NULL non-device @owner then we will assert.
710 * @mr: the #MemoryRegion to be initialized.
711 * @owner: the object that tracks the region's reference count
712 * @name: Region name, becomes part of RAMBlock name used in migration stream
713 * must be unique within any device
714 * @size: size of the region.
715 * @errp: pointer to Error*, to store an error if it happens.
717 void memory_region_init_rom(MemoryRegion
*mr
,
718 struct Object
*owner
,
724 * memory_region_init_rom_device: Initialize a ROM memory region.
725 * Writes are handled via callbacks.
727 * This function initializes a memory region backed by RAM for reads
728 * and callbacks for writes, and arranges for the RAM backing to
729 * be migrated (by calling vmstate_register_ram()
730 * if @owner is a DeviceState, or vmstate_register_ram_global() if
733 * TODO: Currently we restrict @owner to being either NULL (for
734 * global RAM regions with no owner) or devices, so that we can
735 * give the RAM block a unique name for migration purposes.
736 * We should lift this restriction and allow arbitrary Objects.
737 * If you pass a non-NULL non-device @owner then we will assert.
739 * @mr: the #MemoryRegion to be initialized.
740 * @owner: the object that tracks the region's reference count
741 * @ops: callbacks for write access handling (must not be NULL).
742 * @name: Region name, becomes part of RAMBlock name used in migration stream
743 * must be unique within any device
744 * @size: size of the region.
745 * @errp: pointer to Error*, to store an error if it happens.
747 void memory_region_init_rom_device(MemoryRegion
*mr
,
748 struct Object
*owner
,
749 const MemoryRegionOps
*ops
,
757 * memory_region_owner: get a memory region's owner.
759 * @mr: the memory region being queried.
761 struct Object
*memory_region_owner(MemoryRegion
*mr
);
764 * memory_region_size: get a memory region's size.
766 * @mr: the memory region being queried.
768 uint64_t memory_region_size(MemoryRegion
*mr
);
771 * memory_region_is_ram: check whether a memory region is random access
773 * Returns %true is a memory region is random access.
775 * @mr: the memory region being queried
777 static inline bool memory_region_is_ram(MemoryRegion
*mr
)
783 * memory_region_is_ram_device: check whether a memory region is a ram device
785 * Returns %true is a memory region is a device backed ram region
787 * @mr: the memory region being queried
789 bool memory_region_is_ram_device(MemoryRegion
*mr
);
792 * memory_region_is_romd: check whether a memory region is in ROMD mode
794 * Returns %true if a memory region is a ROM device and currently set to allow
797 * @mr: the memory region being queried
799 static inline bool memory_region_is_romd(MemoryRegion
*mr
)
801 return mr
->rom_device
&& mr
->romd_mode
;
805 * memory_region_get_iommu: check whether a memory region is an iommu
807 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
810 * @mr: the memory region being queried
812 static inline IOMMUMemoryRegion
*memory_region_get_iommu(MemoryRegion
*mr
)
815 return memory_region_get_iommu(mr
->alias
);
818 return (IOMMUMemoryRegion
*) mr
;
824 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
825 * if an iommu or NULL if not
827 * Returns pointer to IOMMUMemoryRegioniClass if a memory region is an iommu,
828 * otherwise NULL. This is fast path avoinding QOM checking, use with caution.
830 * @mr: the memory region being queried
832 static inline IOMMUMemoryRegionClass
*memory_region_get_iommu_class_nocheck(
833 IOMMUMemoryRegion
*iommu_mr
)
835 return (IOMMUMemoryRegionClass
*) (((Object
*)iommu_mr
)->class);
838 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
841 * memory_region_iommu_get_min_page_size: get minimum supported page size
844 * Returns minimum supported page size for an iommu.
846 * @iommu_mr: the memory region being queried
848 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
);
851 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
853 * The notification type will be decided by entry.perm bits:
855 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
856 * - For MAP (newly added entry) notifies: set entry.perm to the
857 * permission of the page (which is definitely !IOMMU_NONE).
859 * Note: for any IOMMU implementation, an in-place mapping change
860 * should be notified with an UNMAP followed by a MAP.
862 * @iommu_mr: the memory region that was changed
863 * @entry: the new entry in the IOMMU translation table. The entry
864 * replaces all old entries for the same virtual I/O address range.
865 * Deleted entries have .@perm == 0.
867 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
868 IOMMUTLBEntry entry
);
871 * memory_region_notify_one: notify a change in an IOMMU translation
872 * entry to a single notifier
874 * This works just like memory_region_notify_iommu(), but it only
875 * notifies a specific notifier, not all of them.
877 * @notifier: the notifier to be notified
878 * @entry: the new entry in the IOMMU translation table. The entry
879 * replaces all old entries for the same virtual I/O address range.
880 * Deleted entries have .@perm == 0.
882 void memory_region_notify_one(IOMMUNotifier
*notifier
,
883 IOMMUTLBEntry
*entry
);
886 * memory_region_register_iommu_notifier: register a notifier for changes to
887 * IOMMU translation entries.
889 * @mr: the memory region to observe
890 * @n: the IOMMUNotifier to be added; the notify callback receives a
891 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
892 * ceases to be valid on exit from the notifier.
894 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
898 * memory_region_iommu_replay: replay existing IOMMU translations to
899 * a notifier with the minimum page granularity returned by
900 * mr->iommu_ops->get_page_size().
902 * @iommu_mr: the memory region to observe
903 * @n: the notifier to which to replay iommu mappings
905 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
);
908 * memory_region_iommu_replay_all: replay existing IOMMU translations
909 * to all the notifiers registered.
911 * @iommu_mr: the memory region to observe
913 void memory_region_iommu_replay_all(IOMMUMemoryRegion
*iommu_mr
);
916 * memory_region_unregister_iommu_notifier: unregister a notifier for
917 * changes to IOMMU translation entries.
919 * @mr: the memory region which was observed and for which notity_stopped()
921 * @n: the notifier to be removed.
923 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
927 * memory_region_name: get a memory region's name
929 * Returns the string that was used to initialize the memory region.
931 * @mr: the memory region being queried
933 const char *memory_region_name(const MemoryRegion
*mr
);
936 * memory_region_is_logging: return whether a memory region is logging writes
938 * Returns %true if the memory region is logging writes for the given client
940 * @mr: the memory region being queried
941 * @client: the client being queried
943 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
);
946 * memory_region_get_dirty_log_mask: return the clients for which a
947 * memory region is logging writes.
949 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
950 * are the bit indices.
952 * @mr: the memory region being queried
954 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
);
957 * memory_region_is_rom: check whether a memory region is ROM
959 * Returns %true is a memory region is read-only memory.
961 * @mr: the memory region being queried
963 static inline bool memory_region_is_rom(MemoryRegion
*mr
)
965 return mr
->ram
&& mr
->readonly
;
970 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
972 * Returns a file descriptor backing a file-based RAM memory region,
973 * or -1 if the region is not a file-based RAM memory region.
975 * @mr: the RAM or alias memory region being queried.
977 int memory_region_get_fd(MemoryRegion
*mr
);
980 * memory_region_from_host: Convert a pointer into a RAM memory region
981 * and an offset within it.
983 * Given a host pointer inside a RAM memory region (created with
984 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
985 * the MemoryRegion and the offset within it.
987 * Use with care; by the time this function returns, the returned pointer is
988 * not protected by RCU anymore. If the caller is not within an RCU critical
989 * section and does not hold the iothread lock, it must have other means of
990 * protecting the pointer, such as a reference to the region that includes
991 * the incoming ram_addr_t.
993 * @mr: the memory region being queried.
995 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
);
998 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1000 * Returns a host pointer to a RAM memory region (created with
1001 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1003 * Use with care; by the time this function returns, the returned pointer is
1004 * not protected by RCU anymore. If the caller is not within an RCU critical
1005 * section and does not hold the iothread lock, it must have other means of
1006 * protecting the pointer, such as a reference to the region that includes
1007 * the incoming ram_addr_t.
1009 * @mr: the memory region being queried.
1011 void *memory_region_get_ram_ptr(MemoryRegion
*mr
);
1013 /* memory_region_ram_resize: Resize a RAM region.
1015 * Only legal before guest might have detected the memory size: e.g. on
1016 * incoming migration, or right after reset.
1018 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1019 * @newsize: the new size the region
1020 * @errp: pointer to Error*, to store an error if it happens.
1022 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
,
1026 * memory_region_set_log: Turn dirty logging on or off for a region.
1028 * Turns dirty logging on or off for a specified client (display, migration).
1029 * Only meaningful for RAM regions.
1031 * @mr: the memory region being updated.
1032 * @log: whether dirty logging is to be enabled or disabled.
1033 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1035 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
);
1038 * memory_region_get_dirty: Check whether a range of bytes is dirty
1039 * for a specified client.
1041 * Checks whether a range of bytes has been written to since the last
1042 * call to memory_region_reset_dirty() with the same @client. Dirty logging
1045 * @mr: the memory region being queried.
1046 * @addr: the address (relative to the start of the region) being queried.
1047 * @size: the size of the range being queried.
1048 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1049 * %DIRTY_MEMORY_VGA.
1051 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
1052 hwaddr size
, unsigned client
);
1055 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1057 * Marks a range of bytes as dirty, after it has been dirtied outside
1060 * @mr: the memory region being dirtied.
1061 * @addr: the address (relative to the start of the region) being dirtied.
1062 * @size: size of the range being dirtied.
1064 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
1068 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
1069 * for a specified client. It clears them.
1071 * Checks whether a range of bytes has been written to since the last
1072 * call to memory_region_reset_dirty() with the same @client. Dirty logging
1075 * @mr: the memory region being queried.
1076 * @addr: the address (relative to the start of the region) being queried.
1077 * @size: the size of the range being queried.
1078 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1079 * %DIRTY_MEMORY_VGA.
1081 bool memory_region_test_and_clear_dirty(MemoryRegion
*mr
, hwaddr addr
,
1082 hwaddr size
, unsigned client
);
1085 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1086 * bitmap and clear it.
1088 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1089 * returns the snapshot. The snapshot can then be used to query dirty
1090 * status, using memory_region_snapshot_get_dirty. Unlike
1091 * memory_region_test_and_clear_dirty this allows to query the same
1092 * page multiple times, which is especially useful for display updates
1093 * where the scanlines often are not page aligned.
1095 * The dirty bitmap region which gets copyed into the snapshot (and
1096 * cleared afterwards) can be larger than requested. The boundaries
1097 * are rounded up/down so complete bitmap longs (covering 64 pages on
1098 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1099 * isn't a problem for display updates as the extra pages are outside
1100 * the visible area, and in case the visible area changes a full
1101 * display redraw is due anyway. Should other use cases for this
1102 * function emerge we might have to revisit this implementation
1105 * Use g_free to release DirtyBitmapSnapshot.
1107 * @mr: the memory region being queried.
1108 * @addr: the address (relative to the start of the region) being queried.
1109 * @size: the size of the range being queried.
1110 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1112 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
1118 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1119 * in the specified dirty bitmap snapshot.
1121 * @mr: the memory region being queried.
1122 * @snap: the dirty bitmap snapshot
1123 * @addr: the address (relative to the start of the region) being queried.
1124 * @size: the size of the range being queried.
1126 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
,
1127 DirtyBitmapSnapshot
*snap
,
1128 hwaddr addr
, hwaddr size
);
1131 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
1132 * any external TLBs (e.g. kvm)
1134 * Flushes dirty information from accelerators such as kvm and vhost-net
1135 * and makes it available to users of the memory API.
1137 * @mr: the region being flushed.
1139 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
);
1142 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1145 * Marks a range of pages as no longer dirty.
1147 * @mr: the region being updated.
1148 * @addr: the start of the subrange being cleaned.
1149 * @size: the size of the subrange being cleaned.
1150 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1151 * %DIRTY_MEMORY_VGA.
1153 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
1154 hwaddr size
, unsigned client
);
1157 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1159 * Allows a memory region to be marked as read-only (turning it into a ROM).
1160 * only useful on RAM regions.
1162 * @mr: the region being updated.
1163 * @readonly: whether rhe region is to be ROM or RAM.
1165 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
);
1168 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1170 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1171 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1172 * device is mapped to guest memory and satisfies read access directly.
1173 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1174 * Writes are always handled by the #MemoryRegion.write function.
1176 * @mr: the memory region to be updated
1177 * @romd_mode: %true to put the region into ROMD mode
1179 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
);
1182 * memory_region_set_coalescing: Enable memory coalescing for the region.
1184 * Enabled writes to a region to be queued for later processing. MMIO ->write
1185 * callbacks may be delayed until a non-coalesced MMIO is issued.
1186 * Only useful for IO regions. Roughly similar to write-combining hardware.
1188 * @mr: the memory region to be write coalesced
1190 void memory_region_set_coalescing(MemoryRegion
*mr
);
1193 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1196 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1197 * Multiple calls can be issued coalesced disjoint ranges.
1199 * @mr: the memory region to be updated.
1200 * @offset: the start of the range within the region to be coalesced.
1201 * @size: the size of the subrange to be coalesced.
1203 void memory_region_add_coalescing(MemoryRegion
*mr
,
1208 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1210 * Disables any coalescing caused by memory_region_set_coalescing() or
1211 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1214 * @mr: the memory region to be updated.
1216 void memory_region_clear_coalescing(MemoryRegion
*mr
);
1219 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1222 * Ensure that pending coalesced MMIO request are flushed before the memory
1223 * region is accessed. This property is automatically enabled for all regions
1224 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1226 * @mr: the memory region to be updated.
1228 void memory_region_set_flush_coalesced(MemoryRegion
*mr
);
1231 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1234 * Clear the automatic coalesced MMIO flushing enabled via
1235 * memory_region_set_flush_coalesced. Note that this service has no effect on
1236 * memory regions that have MMIO coalescing enabled for themselves. For them,
1237 * automatic flushing will stop once coalescing is disabled.
1239 * @mr: the memory region to be updated.
1241 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
);
1244 * memory_region_set_global_locking: Declares the access processing requires
1245 * QEMU's global lock.
1247 * When this is invoked, accesses to the memory region will be processed while
1248 * holding the global lock of QEMU. This is the default behavior of memory
1251 * @mr: the memory region to be updated.
1253 void memory_region_set_global_locking(MemoryRegion
*mr
);
1256 * memory_region_clear_global_locking: Declares that access processing does
1257 * not depend on the QEMU global lock.
1259 * By clearing this property, accesses to the memory region will be processed
1260 * outside of QEMU's global lock (unless the lock is held on when issuing the
1261 * access request). In this case, the device model implementing the access
1262 * handlers is responsible for synchronization of concurrency.
1264 * @mr: the memory region to be updated.
1266 void memory_region_clear_global_locking(MemoryRegion
*mr
);
1269 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1270 * is written to a location.
1272 * Marks a word in an IO region (initialized with memory_region_init_io())
1273 * as a trigger for an eventfd event. The I/O callback will not be called.
1274 * The caller must be prepared to handle failure (that is, take the required
1275 * action if the callback _is_ called).
1277 * @mr: the memory region being updated.
1278 * @addr: the address within @mr that is to be monitored
1279 * @size: the size of the access to trigger the eventfd
1280 * @match_data: whether to match against @data, instead of just @addr
1281 * @data: the data to match against the guest write
1282 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1284 void memory_region_add_eventfd(MemoryRegion
*mr
,
1292 * memory_region_del_eventfd: Cancel an eventfd.
1294 * Cancels an eventfd trigger requested by a previous
1295 * memory_region_add_eventfd() call.
1297 * @mr: the memory region being updated.
1298 * @addr: the address within @mr that is to be monitored
1299 * @size: the size of the access to trigger the eventfd
1300 * @match_data: whether to match against @data, instead of just @addr
1301 * @data: the data to match against the guest write
1302 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1304 void memory_region_del_eventfd(MemoryRegion
*mr
,
1312 * memory_region_add_subregion: Add a subregion to a container.
1314 * Adds a subregion at @offset. The subregion may not overlap with other
1315 * subregions (except for those explicitly marked as overlapping). A region
1316 * may only be added once as a subregion (unless removed with
1317 * memory_region_del_subregion()); use memory_region_init_alias() if you
1318 * want a region to be a subregion in multiple locations.
1320 * @mr: the region to contain the new subregion; must be a container
1321 * initialized with memory_region_init().
1322 * @offset: the offset relative to @mr where @subregion is added.
1323 * @subregion: the subregion to be added.
1325 void memory_region_add_subregion(MemoryRegion
*mr
,
1327 MemoryRegion
*subregion
);
1329 * memory_region_add_subregion_overlap: Add a subregion to a container
1332 * Adds a subregion at @offset. The subregion may overlap with other
1333 * subregions. Conflicts are resolved by having a higher @priority hide a
1334 * lower @priority. Subregions without priority are taken as @priority 0.
1335 * A region may only be added once as a subregion (unless removed with
1336 * memory_region_del_subregion()); use memory_region_init_alias() if you
1337 * want a region to be a subregion in multiple locations.
1339 * @mr: the region to contain the new subregion; must be a container
1340 * initialized with memory_region_init().
1341 * @offset: the offset relative to @mr where @subregion is added.
1342 * @subregion: the subregion to be added.
1343 * @priority: used for resolving overlaps; highest priority wins.
1345 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
1347 MemoryRegion
*subregion
,
1351 * memory_region_get_ram_addr: Get the ram address associated with a memory
1354 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
);
1356 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
);
1358 * memory_region_del_subregion: Remove a subregion.
1360 * Removes a subregion from its container.
1362 * @mr: the container to be updated.
1363 * @subregion: the region being removed; must be a current subregion of @mr.
1365 void memory_region_del_subregion(MemoryRegion
*mr
,
1366 MemoryRegion
*subregion
);
1369 * memory_region_set_enabled: dynamically enable or disable a region
1371 * Enables or disables a memory region. A disabled memory region
1372 * ignores all accesses to itself and its subregions. It does not
1373 * obscure sibling subregions with lower priority - it simply behaves as
1374 * if it was removed from the hierarchy.
1376 * Regions default to being enabled.
1378 * @mr: the region to be updated
1379 * @enabled: whether to enable or disable the region
1381 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
);
1384 * memory_region_set_address: dynamically update the address of a region
1386 * Dynamically updates the address of a region, relative to its container.
1387 * May be used on regions are currently part of a memory hierarchy.
1389 * @mr: the region to be updated
1390 * @addr: new address, relative to container region
1392 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
);
1395 * memory_region_set_size: dynamically update the size of a region.
1397 * Dynamically updates the size of a region.
1399 * @mr: the region to be updated
1400 * @size: used size of the region.
1402 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
);
1405 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1407 * Dynamically updates the offset into the target region that an alias points
1408 * to, as if the fourth argument to memory_region_init_alias() has changed.
1410 * @mr: the #MemoryRegion to be updated; should be an alias.
1411 * @offset: the new offset into the target memory region
1413 void memory_region_set_alias_offset(MemoryRegion
*mr
,
1417 * memory_region_present: checks if an address relative to a @container
1418 * translates into #MemoryRegion within @container
1420 * Answer whether a #MemoryRegion within @container covers the address
1423 * @container: a #MemoryRegion within which @addr is a relative address
1424 * @addr: the area within @container to be searched
1426 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
);
1429 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1430 * into any address space.
1432 * @mr: a #MemoryRegion which should be checked if it's mapped
1434 bool memory_region_is_mapped(MemoryRegion
*mr
);
1437 * memory_region_find: translate an address/size relative to a
1438 * MemoryRegion into a #MemoryRegionSection.
1440 * Locates the first #MemoryRegion within @mr that overlaps the range
1441 * given by @addr and @size.
1443 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1444 * It will have the following characteristics:
1445 * .@size = 0 iff no overlap was found
1446 * .@mr is non-%NULL iff an overlap was found
1448 * Remember that in the return value the @offset_within_region is
1449 * relative to the returned region (in the .@mr field), not to the
1452 * Similarly, the .@offset_within_address_space is relative to the
1453 * address space that contains both regions, the passed and the
1454 * returned one. However, in the special case where the @mr argument
1455 * has no container (and thus is the root of the address space), the
1456 * following will hold:
1457 * .@offset_within_address_space >= @addr
1458 * .@offset_within_address_space + .@size <= @addr + @size
1460 * @mr: a MemoryRegion within which @addr is a relative address
1461 * @addr: start of the area within @as to be searched
1462 * @size: size of the area to be searched
1464 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
1465 hwaddr addr
, uint64_t size
);
1468 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1470 * Synchronizes the dirty page log for all address spaces.
1472 void memory_global_dirty_log_sync(void);
1475 * memory_region_transaction_begin: Start a transaction.
1477 * During a transaction, changes will be accumulated and made visible
1478 * only when the transaction ends (is committed).
1480 void memory_region_transaction_begin(void);
1483 * memory_region_transaction_commit: Commit a transaction and make changes
1484 * visible to the guest.
1486 void memory_region_transaction_commit(void);
1489 * memory_listener_register: register callbacks to be called when memory
1490 * sections are mapped or unmapped into an address
1493 * @listener: an object containing the callbacks to be called
1494 * @filter: if non-%NULL, only regions in this address space will be observed
1496 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*filter
);
1499 * memory_listener_unregister: undo the effect of memory_listener_register()
1501 * @listener: an object containing the callbacks to be removed
1503 void memory_listener_unregister(MemoryListener
*listener
);
1506 * memory_global_dirty_log_start: begin dirty logging for all regions
1508 void memory_global_dirty_log_start(void);
1511 * memory_global_dirty_log_stop: end dirty logging for all regions
1513 void memory_global_dirty_log_stop(void);
1515 void mtree_info(fprintf_function mon_printf
, void *f
, bool flatview
);
1518 * memory_region_request_mmio_ptr: request a pointer to an mmio
1519 * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer.
1520 * When the device wants to invalidate the pointer it will call
1521 * memory_region_invalidate_mmio_ptr.
1523 * @mr: #MemoryRegion to check
1524 * @addr: address within that region
1526 * Returns true on success, false otherwise.
1528 bool memory_region_request_mmio_ptr(MemoryRegion
*mr
, hwaddr addr
);
1531 * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio
1532 * previously requested.
1533 * In the end that means that if something wants to execute from this area it
1534 * will need to request the pointer again.
1536 * @mr: #MemoryRegion associated to the pointer.
1537 * @addr: address within that region
1538 * @size: size of that area.
1540 void memory_region_invalidate_mmio_ptr(MemoryRegion
*mr
, hwaddr offset
,
1544 * memory_region_dispatch_read: perform a read directly to the specified
1547 * @mr: #MemoryRegion to access
1548 * @addr: address within that region
1549 * @pval: pointer to uint64_t which the data is written to
1550 * @size: size of the access in bytes
1551 * @attrs: memory transaction attributes to use for the access
1553 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1559 * memory_region_dispatch_write: perform a write directly to the specified
1562 * @mr: #MemoryRegion to access
1563 * @addr: address within that region
1564 * @data: data to write
1565 * @size: size of the access in bytes
1566 * @attrs: memory transaction attributes to use for the access
1568 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1575 * address_space_init: initializes an address space
1577 * @as: an uninitialized #AddressSpace
1578 * @root: a #MemoryRegion that routes addresses for the address space
1579 * @name: an address space name. The name is only used for debugging
1582 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
);
1585 * address_space_init_shareable: return an address space for a memory region,
1586 * creating it if it does not already exist
1588 * @root: a #MemoryRegion that routes addresses for the address space
1589 * @name: an address space name. The name is only used for debugging
1592 * This function will return a pointer to an existing AddressSpace
1593 * which was initialized with the specified MemoryRegion, or it will
1594 * create and initialize one if it does not already exist. The ASes
1595 * are reference-counted, so the memory will be freed automatically
1596 * when the AddressSpace is destroyed via address_space_destroy.
1598 AddressSpace
*address_space_init_shareable(MemoryRegion
*root
,
1602 * address_space_destroy: destroy an address space
1604 * Releases all resources associated with an address space. After an address space
1605 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1608 * @as: address space to be destroyed
1610 void address_space_destroy(AddressSpace
*as
);
1613 * address_space_rw: read from or write to an address space.
1615 * Return a MemTxResult indicating whether the operation succeeded
1616 * or failed (eg unassigned memory, device rejected the transaction,
1619 * @as: #AddressSpace to be accessed
1620 * @addr: address within that address space
1621 * @attrs: memory transaction attributes
1622 * @buf: buffer with the data transferred
1623 * @is_write: indicates the transfer direction
1625 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
,
1626 MemTxAttrs attrs
, uint8_t *buf
,
1627 int len
, bool is_write
);
1630 * address_space_write: write to address space.
1632 * Return a MemTxResult indicating whether the operation succeeded
1633 * or failed (eg unassigned memory, device rejected the transaction,
1636 * @as: #AddressSpace to be accessed
1637 * @addr: address within that address space
1638 * @attrs: memory transaction attributes
1639 * @buf: buffer with the data transferred
1641 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
,
1643 const uint8_t *buf
, int len
);
1645 /* address_space_ld*: load from an address space
1646 * address_space_st*: store to an address space
1648 * These functions perform a load or store of the byte, word,
1649 * longword or quad to the specified address within the AddressSpace.
1650 * The _le suffixed functions treat the data as little endian;
1651 * _be indicates big endian; no suffix indicates "same endianness
1654 * The "guest CPU endianness" accessors are deprecated for use outside
1655 * target-* code; devices should be CPU-agnostic and use either the LE
1656 * or the BE accessors.
1658 * @as #AddressSpace to be accessed
1659 * @addr: address within that address space
1660 * @val: data value, for stores
1661 * @attrs: memory transaction attributes
1662 * @result: location to write the success/failure of the transaction;
1663 * if NULL, this information is discarded
1665 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
1666 MemTxAttrs attrs
, MemTxResult
*result
);
1667 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
1668 MemTxAttrs attrs
, MemTxResult
*result
);
1669 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
1670 MemTxAttrs attrs
, MemTxResult
*result
);
1671 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
1672 MemTxAttrs attrs
, MemTxResult
*result
);
1673 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
1674 MemTxAttrs attrs
, MemTxResult
*result
);
1675 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
1676 MemTxAttrs attrs
, MemTxResult
*result
);
1677 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
1678 MemTxAttrs attrs
, MemTxResult
*result
);
1679 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
1680 MemTxAttrs attrs
, MemTxResult
*result
);
1681 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
1682 MemTxAttrs attrs
, MemTxResult
*result
);
1683 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
1684 MemTxAttrs attrs
, MemTxResult
*result
);
1685 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
1686 MemTxAttrs attrs
, MemTxResult
*result
);
1687 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
1688 MemTxAttrs attrs
, MemTxResult
*result
);
1689 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
1690 MemTxAttrs attrs
, MemTxResult
*result
);
1691 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
1692 MemTxAttrs attrs
, MemTxResult
*result
);
1694 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
);
1695 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
);
1696 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
);
1697 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
);
1698 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
);
1699 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
);
1700 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
);
1701 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
);
1702 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
);
1703 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
);
1704 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
);
1705 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
);
1706 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
);
1707 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
);
1709 struct MemoryRegionCache
{
1715 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
1717 /* address_space_cache_init: prepare for repeated access to a physical
1720 * @cache: #MemoryRegionCache to be filled
1721 * @as: #AddressSpace to be accessed
1722 * @addr: address within that address space
1723 * @len: length of buffer
1724 * @is_write: indicates the transfer direction
1726 * Will only work with RAM, and may map a subset of the requested range by
1727 * returning a value that is less than @len. On failure, return a negative
1730 * Because it only works with RAM, this function can be used for
1731 * read-modify-write operations. In this case, is_write should be %true.
1733 * Note that addresses passed to the address_space_*_cached functions
1734 * are relative to @addr.
1736 int64_t address_space_cache_init(MemoryRegionCache
*cache
,
1743 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1745 * @cache: The #MemoryRegionCache to operate on.
1746 * @addr: The first physical address that was written, relative to the
1747 * address that was passed to @address_space_cache_init.
1748 * @access_len: The number of bytes that were written starting at @addr.
1750 void address_space_cache_invalidate(MemoryRegionCache
*cache
,
1755 * address_space_cache_destroy: free a #MemoryRegionCache
1757 * @cache: The #MemoryRegionCache whose memory should be released.
1759 void address_space_cache_destroy(MemoryRegionCache
*cache
);
1761 /* address_space_ld*_cached: load from a cached #MemoryRegion
1762 * address_space_st*_cached: store into a cached #MemoryRegion
1764 * These functions perform a load or store of the byte, word,
1765 * longword or quad to the specified address. The address is
1766 * a physical address in the AddressSpace, but it must lie within
1767 * a #MemoryRegion that was mapped with address_space_cache_init.
1769 * The _le suffixed functions treat the data as little endian;
1770 * _be indicates big endian; no suffix indicates "same endianness
1773 * The "guest CPU endianness" accessors are deprecated for use outside
1774 * target-* code; devices should be CPU-agnostic and use either the LE
1775 * or the BE accessors.
1777 * @cache: previously initialized #MemoryRegionCache to be accessed
1778 * @addr: address within the address space
1779 * @val: data value, for stores
1780 * @attrs: memory transaction attributes
1781 * @result: location to write the success/failure of the transaction;
1782 * if NULL, this information is discarded
1784 uint32_t address_space_ldub_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1785 MemTxAttrs attrs
, MemTxResult
*result
);
1786 uint32_t address_space_lduw_le_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1787 MemTxAttrs attrs
, MemTxResult
*result
);
1788 uint32_t address_space_lduw_be_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1789 MemTxAttrs attrs
, MemTxResult
*result
);
1790 uint32_t address_space_ldl_le_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1791 MemTxAttrs attrs
, MemTxResult
*result
);
1792 uint32_t address_space_ldl_be_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1793 MemTxAttrs attrs
, MemTxResult
*result
);
1794 uint64_t address_space_ldq_le_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1795 MemTxAttrs attrs
, MemTxResult
*result
);
1796 uint64_t address_space_ldq_be_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1797 MemTxAttrs attrs
, MemTxResult
*result
);
1798 void address_space_stb_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
,
1799 MemTxAttrs attrs
, MemTxResult
*result
);
1800 void address_space_stw_le_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
,
1801 MemTxAttrs attrs
, MemTxResult
*result
);
1802 void address_space_stw_be_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
,
1803 MemTxAttrs attrs
, MemTxResult
*result
);
1804 void address_space_stl_le_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
,
1805 MemTxAttrs attrs
, MemTxResult
*result
);
1806 void address_space_stl_be_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
,
1807 MemTxAttrs attrs
, MemTxResult
*result
);
1808 void address_space_stq_le_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint64_t val
,
1809 MemTxAttrs attrs
, MemTxResult
*result
);
1810 void address_space_stq_be_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint64_t val
,
1811 MemTxAttrs attrs
, MemTxResult
*result
);
1813 uint32_t ldub_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
);
1814 uint32_t lduw_le_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
);
1815 uint32_t lduw_be_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
);
1816 uint32_t ldl_le_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
);
1817 uint32_t ldl_be_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
);
1818 uint64_t ldq_le_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
);
1819 uint64_t ldq_be_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
);
1820 void stb_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
);
1821 void stw_le_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
);
1822 void stw_be_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
);
1823 void stl_le_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
);
1824 void stl_be_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint32_t val
);
1825 void stq_le_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint64_t val
);
1826 void stq_be_phys_cached(MemoryRegionCache
*cache
, hwaddr addr
, uint64_t val
);
1827 /* address_space_get_iotlb_entry: translate an address into an IOTLB
1828 * entry. Should be called from an RCU critical section.
1830 IOMMUTLBEntry
address_space_get_iotlb_entry(AddressSpace
*as
, hwaddr addr
,
1833 /* address_space_translate: translate an address range into an address space
1834 * into a MemoryRegion and an address range into that section. Should be
1835 * called from an RCU critical section, to avoid that the last reference
1836 * to the returned region disappears after address_space_translate returns.
1838 * @as: #AddressSpace to be accessed
1839 * @addr: address within that address space
1840 * @xlat: pointer to address within the returned memory region section's
1842 * @len: pointer to length
1843 * @is_write: indicates the transfer direction
1845 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
1846 hwaddr
*xlat
, hwaddr
*len
,
1849 /* address_space_access_valid: check for validity of accessing an address
1852 * Check whether memory is assigned to the given address space range, and
1853 * access is permitted by any IOMMU regions that are active for the address
1856 * For now, addr and len should be aligned to a page size. This limitation
1857 * will be lifted in the future.
1859 * @as: #AddressSpace to be accessed
1860 * @addr: address within that address space
1861 * @len: length of the area to be checked
1862 * @is_write: indicates the transfer direction
1864 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
);
1866 /* address_space_map: map a physical memory region into a host virtual address
1868 * May map a subset of the requested range, given by and returned in @plen.
1869 * May return %NULL if resources needed to perform the mapping are exhausted.
1870 * Use only for reads OR writes - not for read-modify-write operations.
1871 * Use cpu_register_map_client() to know when retrying the map operation is
1872 * likely to succeed.
1874 * @as: #AddressSpace to be accessed
1875 * @addr: address within that address space
1876 * @plen: pointer to length of buffer; updated on return
1877 * @is_write: indicates the transfer direction
1879 void *address_space_map(AddressSpace
*as
, hwaddr addr
,
1880 hwaddr
*plen
, bool is_write
);
1882 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1884 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1885 * the amount of memory that was actually read or written by the caller.
1887 * @as: #AddressSpace used
1888 * @addr: address within that address space
1889 * @len: buffer length as returned by address_space_map()
1890 * @access_len: amount of data actually transferred
1891 * @is_write: indicates the transfer direction
1893 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
1894 int is_write
, hwaddr access_len
);
1897 /* Internal functions, part of the implementation of address_space_read. */
1898 MemTxResult
address_space_read_continue(AddressSpace
*as
, hwaddr addr
,
1899 MemTxAttrs attrs
, uint8_t *buf
,
1900 int len
, hwaddr addr1
, hwaddr l
,
1902 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
1903 MemTxAttrs attrs
, uint8_t *buf
, int len
);
1904 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
);
1906 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
1909 return memory_region_is_ram(mr
) &&
1910 !mr
->readonly
&& !memory_region_is_ram_device(mr
);
1912 return (memory_region_is_ram(mr
) && !memory_region_is_ram_device(mr
)) ||
1913 memory_region_is_romd(mr
);
1918 * address_space_read: read from an address space.
1920 * Return a MemTxResult indicating whether the operation succeeded
1921 * or failed (eg unassigned memory, device rejected the transaction,
1924 * @as: #AddressSpace to be accessed
1925 * @addr: address within that address space
1926 * @attrs: memory transaction attributes
1927 * @buf: buffer with the data transferred
1929 static inline __attribute__((__always_inline__
))
1930 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
1931 uint8_t *buf
, int len
)
1933 MemTxResult result
= MEMTX_OK
;
1938 if (__builtin_constant_p(len
)) {
1942 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
1943 if (len
== l
&& memory_access_is_direct(mr
, false)) {
1944 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
1945 memcpy(buf
, ptr
, len
);
1947 result
= address_space_read_continue(as
, addr
, attrs
, buf
, len
,
1953 result
= address_space_read_full(as
, addr
, attrs
, buf
, len
);
1959 * address_space_read_cached: read from a cached RAM region
1961 * @cache: Cached region to be addressed
1962 * @addr: address relative to the base of the RAM region
1963 * @buf: buffer with the data transferred
1964 * @len: length of the data transferred
1967 address_space_read_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1970 assert(addr
< cache
->len
&& len
<= cache
->len
- addr
);
1971 address_space_read(cache
->as
, cache
->xlat
+ addr
, MEMTXATTRS_UNSPECIFIED
, buf
, len
);
1975 * address_space_write_cached: write to a cached RAM region
1977 * @cache: Cached region to be addressed
1978 * @addr: address relative to the base of the RAM region
1979 * @buf: buffer with the data transferred
1980 * @len: length of the data transferred
1983 address_space_write_cached(MemoryRegionCache
*cache
, hwaddr addr
,
1986 assert(addr
< cache
->len
&& len
<= cache
->len
- addr
);
1987 address_space_write(cache
->as
, cache
->xlat
+ addr
, MEMTXATTRS_UNSPECIFIED
, buf
, len
);