Memory: Enable writeback for given memory region
[qemu/ar7.git] / include / exec / memory.h
blob27a84e0cc3460af0f2b082f2a633e0ea88730d2c
1 /*
2 * Physical memory management API
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef MEMORY_H
15 #define MEMORY_H
17 #ifndef CONFIG_USER_ONLY
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/notify.h"
28 #include "qom/object.h"
29 #include "qemu/rcu.h"
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
33 #define MAX_PHYS_ADDR_SPACE_BITS 62
34 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
36 #define TYPE_MEMORY_REGION "qemu:memory-region"
37 #define MEMORY_REGION(obj) \
38 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
40 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
41 #define IOMMU_MEMORY_REGION(obj) \
42 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
43 #define IOMMU_MEMORY_REGION_CLASS(klass) \
44 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
45 TYPE_IOMMU_MEMORY_REGION)
46 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
47 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
48 TYPE_IOMMU_MEMORY_REGION)
50 extern bool global_dirty_log;
52 typedef struct MemoryRegionOps MemoryRegionOps;
53 typedef struct MemoryRegionMmio MemoryRegionMmio;
55 struct MemoryRegionMmio {
56 CPUReadMemoryFunc *read[3];
57 CPUWriteMemoryFunc *write[3];
60 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
62 /* See address_space_translate: bit 0 is read, bit 1 is write. */
63 typedef enum {
64 IOMMU_NONE = 0,
65 IOMMU_RO = 1,
66 IOMMU_WO = 2,
67 IOMMU_RW = 3,
68 } IOMMUAccessFlags;
70 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
72 struct IOMMUTLBEntry {
73 AddressSpace *target_as;
74 hwaddr iova;
75 hwaddr translated_addr;
76 hwaddr addr_mask; /* 0xfff = 4k translation */
77 IOMMUAccessFlags perm;
81 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
82 * register with one or multiple IOMMU Notifier capability bit(s).
84 typedef enum {
85 IOMMU_NOTIFIER_NONE = 0,
86 /* Notify cache invalidations */
87 IOMMU_NOTIFIER_UNMAP = 0x1,
88 /* Notify entry changes (newly created entries) */
89 IOMMU_NOTIFIER_MAP = 0x2,
90 } IOMMUNotifierFlag;
92 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
94 struct IOMMUNotifier;
95 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
96 IOMMUTLBEntry *data);
98 struct IOMMUNotifier {
99 IOMMUNotify notify;
100 IOMMUNotifierFlag notifier_flags;
101 /* Notify for address space range start <= addr <= end */
102 hwaddr start;
103 hwaddr end;
104 int iommu_idx;
105 QLIST_ENTRY(IOMMUNotifier) node;
107 typedef struct IOMMUNotifier IOMMUNotifier;
109 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
110 #define RAM_PREALLOC (1 << 0)
112 /* RAM is mmap-ed with MAP_SHARED */
113 #define RAM_SHARED (1 << 1)
115 /* Only a portion of RAM (used_length) is actually used, and migrated.
116 * This used_length size can change across reboots.
118 #define RAM_RESIZEABLE (1 << 2)
120 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
121 * zero the page and wake waiting processes.
122 * (Set during postcopy)
124 #define RAM_UF_ZEROPAGE (1 << 3)
126 /* RAM can be migrated */
127 #define RAM_MIGRATABLE (1 << 4)
129 /* RAM is a persistent kind memory */
130 #define RAM_PMEM (1 << 5)
132 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
133 IOMMUNotifierFlag flags,
134 hwaddr start, hwaddr end,
135 int iommu_idx)
137 n->notify = fn;
138 n->notifier_flags = flags;
139 n->start = start;
140 n->end = end;
141 n->iommu_idx = iommu_idx;
145 * Memory region callbacks
147 struct MemoryRegionOps {
148 /* Read from the memory region. @addr is relative to @mr; @size is
149 * in bytes. */
150 uint64_t (*read)(void *opaque,
151 hwaddr addr,
152 unsigned size);
153 /* Write to the memory region. @addr is relative to @mr; @size is
154 * in bytes. */
155 void (*write)(void *opaque,
156 hwaddr addr,
157 uint64_t data,
158 unsigned size);
160 MemTxResult (*read_with_attrs)(void *opaque,
161 hwaddr addr,
162 uint64_t *data,
163 unsigned size,
164 MemTxAttrs attrs);
165 MemTxResult (*write_with_attrs)(void *opaque,
166 hwaddr addr,
167 uint64_t data,
168 unsigned size,
169 MemTxAttrs attrs);
171 enum device_endian endianness;
172 /* Guest-visible constraints: */
173 struct {
174 /* If nonzero, specify bounds on access sizes beyond which a machine
175 * check is thrown.
177 unsigned min_access_size;
178 unsigned max_access_size;
179 /* If true, unaligned accesses are supported. Otherwise unaligned
180 * accesses throw machine checks.
182 bool unaligned;
184 * If present, and returns #false, the transaction is not accepted
185 * by the device (and results in machine dependent behaviour such
186 * as a machine check exception).
188 bool (*accepts)(void *opaque, hwaddr addr,
189 unsigned size, bool is_write,
190 MemTxAttrs attrs);
191 } valid;
192 /* Internal implementation constraints: */
193 struct {
194 /* If nonzero, specifies the minimum size implemented. Smaller sizes
195 * will be rounded upwards and a partial result will be returned.
197 unsigned min_access_size;
198 /* If nonzero, specifies the maximum size implemented. Larger sizes
199 * will be done as a series of accesses with smaller sizes.
201 unsigned max_access_size;
202 /* If true, unaligned accesses are supported. Otherwise all accesses
203 * are converted to (possibly multiple) naturally aligned accesses.
205 bool unaligned;
206 } impl;
209 typedef struct MemoryRegionClass {
210 /* private */
211 ObjectClass parent_class;
212 } MemoryRegionClass;
215 enum IOMMUMemoryRegionAttr {
216 IOMMU_ATTR_SPAPR_TCE_FD
220 * IOMMUMemoryRegionClass:
222 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
223 * and provide an implementation of at least the @translate method here
224 * to handle requests to the memory region. Other methods are optional.
226 * The IOMMU implementation must use the IOMMU notifier infrastructure
227 * to report whenever mappings are changed, by calling
228 * memory_region_notify_iommu() (or, if necessary, by calling
229 * memory_region_notify_one() for each registered notifier).
231 * Conceptually an IOMMU provides a mapping from input address
232 * to an output TLB entry. If the IOMMU is aware of memory transaction
233 * attributes and the output TLB entry depends on the transaction
234 * attributes, we represent this using IOMMU indexes. Each index
235 * selects a particular translation table that the IOMMU has:
236 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
237 * @translate takes an input address and an IOMMU index
238 * and the mapping returned can only depend on the input address and the
239 * IOMMU index.
241 * Most IOMMUs don't care about the transaction attributes and support
242 * only a single IOMMU index. A more complex IOMMU might have one index
243 * for secure transactions and one for non-secure transactions.
245 typedef struct IOMMUMemoryRegionClass {
246 /* private */
247 MemoryRegionClass parent_class;
250 * Return a TLB entry that contains a given address.
252 * The IOMMUAccessFlags indicated via @flag are optional and may
253 * be specified as IOMMU_NONE to indicate that the caller needs
254 * the full translation information for both reads and writes. If
255 * the access flags are specified then the IOMMU implementation
256 * may use this as an optimization, to stop doing a page table
257 * walk as soon as it knows that the requested permissions are not
258 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
259 * full page table walk and report the permissions in the returned
260 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
261 * return different mappings for reads and writes.)
263 * The returned information remains valid while the caller is
264 * holding the big QEMU lock or is inside an RCU critical section;
265 * if the caller wishes to cache the mapping beyond that it must
266 * register an IOMMU notifier so it can invalidate its cached
267 * information when the IOMMU mapping changes.
269 * @iommu: the IOMMUMemoryRegion
270 * @hwaddr: address to be translated within the memory region
271 * @flag: requested access permissions
272 * @iommu_idx: IOMMU index for the translation
274 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
275 IOMMUAccessFlags flag, int iommu_idx);
276 /* Returns minimum supported page size in bytes.
277 * If this method is not provided then the minimum is assumed to
278 * be TARGET_PAGE_SIZE.
280 * @iommu: the IOMMUMemoryRegion
282 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
283 /* Called when IOMMU Notifier flag changes (ie when the set of
284 * events which IOMMU users are requesting notification for changes).
285 * Optional method -- need not be provided if the IOMMU does not
286 * need to know exactly which events must be notified.
288 * @iommu: the IOMMUMemoryRegion
289 * @old_flags: events which previously needed to be notified
290 * @new_flags: events which now need to be notified
292 * Returns 0 on success, or a negative errno; in particular
293 * returns -EINVAL if the new flag bitmap is not supported by the
294 * IOMMU memory region. In case of failure, the error object
295 * must be created
297 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
298 IOMMUNotifierFlag old_flags,
299 IOMMUNotifierFlag new_flags,
300 Error **errp);
301 /* Called to handle memory_region_iommu_replay().
303 * The default implementation of memory_region_iommu_replay() is to
304 * call the IOMMU translate method for every page in the address space
305 * with flag == IOMMU_NONE and then call the notifier if translate
306 * returns a valid mapping. If this method is implemented then it
307 * overrides the default behaviour, and must provide the full semantics
308 * of memory_region_iommu_replay(), by calling @notifier for every
309 * translation present in the IOMMU.
311 * Optional method -- an IOMMU only needs to provide this method
312 * if the default is inefficient or produces undesirable side effects.
314 * Note: this is not related to record-and-replay functionality.
316 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
318 /* Get IOMMU misc attributes. This is an optional method that
319 * can be used to allow users of the IOMMU to get implementation-specific
320 * information. The IOMMU implements this method to handle calls
321 * by IOMMU users to memory_region_iommu_get_attr() by filling in
322 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
323 * the IOMMU supports. If the method is unimplemented then
324 * memory_region_iommu_get_attr() will always return -EINVAL.
326 * @iommu: the IOMMUMemoryRegion
327 * @attr: attribute being queried
328 * @data: memory to fill in with the attribute data
330 * Returns 0 on success, or a negative errno; in particular
331 * returns -EINVAL for unrecognized or unimplemented attribute types.
333 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
334 void *data);
336 /* Return the IOMMU index to use for a given set of transaction attributes.
338 * Optional method: if an IOMMU only supports a single IOMMU index then
339 * the default implementation of memory_region_iommu_attrs_to_index()
340 * will return 0.
342 * The indexes supported by an IOMMU must be contiguous, starting at 0.
344 * @iommu: the IOMMUMemoryRegion
345 * @attrs: memory transaction attributes
347 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
349 /* Return the number of IOMMU indexes this IOMMU supports.
351 * Optional method: if this method is not provided, then
352 * memory_region_iommu_num_indexes() will return 1, indicating that
353 * only a single IOMMU index is supported.
355 * @iommu: the IOMMUMemoryRegion
357 int (*num_indexes)(IOMMUMemoryRegion *iommu);
358 } IOMMUMemoryRegionClass;
360 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
361 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
363 struct MemoryRegion {
364 Object parent_obj;
366 /* All fields are private - violators will be prosecuted */
368 /* The following fields should fit in a cache line */
369 bool romd_mode;
370 bool ram;
371 bool subpage;
372 bool readonly; /* For RAM regions */
373 bool nonvolatile;
374 bool rom_device;
375 bool flush_coalesced_mmio;
376 bool global_locking;
377 uint8_t dirty_log_mask;
378 bool is_iommu;
379 RAMBlock *ram_block;
380 Object *owner;
382 const MemoryRegionOps *ops;
383 void *opaque;
384 MemoryRegion *container;
385 Int128 size;
386 hwaddr addr;
387 void (*destructor)(MemoryRegion *mr);
388 uint64_t align;
389 bool terminates;
390 bool ram_device;
391 bool enabled;
392 bool warning_printed; /* For reservations */
393 uint8_t vga_logging_count;
394 MemoryRegion *alias;
395 hwaddr alias_offset;
396 int32_t priority;
397 QTAILQ_HEAD(, MemoryRegion) subregions;
398 QTAILQ_ENTRY(MemoryRegion) subregions_link;
399 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
400 const char *name;
401 unsigned ioeventfd_nb;
402 MemoryRegionIoeventfd *ioeventfds;
405 struct IOMMUMemoryRegion {
406 MemoryRegion parent_obj;
408 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
409 IOMMUNotifierFlag iommu_notify_flags;
412 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
413 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
416 * MemoryListener: callbacks structure for updates to the physical memory map
418 * Allows a component to adjust to changes in the guest-visible memory map.
419 * Use with memory_listener_register() and memory_listener_unregister().
421 struct MemoryListener {
422 void (*begin)(MemoryListener *listener);
423 void (*commit)(MemoryListener *listener);
424 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
425 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
426 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
427 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
428 int old, int new);
429 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
430 int old, int new);
431 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
432 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
433 void (*log_global_start)(MemoryListener *listener);
434 void (*log_global_stop)(MemoryListener *listener);
435 void (*log_global_after_sync)(MemoryListener *listener);
436 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
437 bool match_data, uint64_t data, EventNotifier *e);
438 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
439 bool match_data, uint64_t data, EventNotifier *e);
440 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
441 hwaddr addr, hwaddr len);
442 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
443 hwaddr addr, hwaddr len);
444 /* Lower = earlier (during add), later (during del) */
445 unsigned priority;
446 AddressSpace *address_space;
447 QTAILQ_ENTRY(MemoryListener) link;
448 QTAILQ_ENTRY(MemoryListener) link_as;
452 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
454 struct AddressSpace {
455 /* All fields are private. */
456 struct rcu_head rcu;
457 char *name;
458 MemoryRegion *root;
460 /* Accessed via RCU. */
461 struct FlatView *current_map;
463 int ioeventfd_nb;
464 struct MemoryRegionIoeventfd *ioeventfds;
465 QTAILQ_HEAD(, MemoryListener) listeners;
466 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
469 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
470 typedef struct FlatRange FlatRange;
472 /* Flattened global view of current active memory hierarchy. Kept in sorted
473 * order.
475 struct FlatView {
476 struct rcu_head rcu;
477 unsigned ref;
478 FlatRange *ranges;
479 unsigned nr;
480 unsigned nr_allocated;
481 struct AddressSpaceDispatch *dispatch;
482 MemoryRegion *root;
485 static inline FlatView *address_space_to_flatview(AddressSpace *as)
487 return atomic_rcu_read(&as->current_map);
492 * MemoryRegionSection: describes a fragment of a #MemoryRegion
494 * @mr: the region, or %NULL if empty
495 * @fv: the flat view of the address space the region is mapped in
496 * @offset_within_region: the beginning of the section, relative to @mr's start
497 * @size: the size of the section; will not exceed @mr's boundaries
498 * @offset_within_address_space: the address of the first byte of the section
499 * relative to the region's address space
500 * @readonly: writes to this section are ignored
501 * @nonvolatile: this section is non-volatile
503 struct MemoryRegionSection {
504 Int128 size;
505 MemoryRegion *mr;
506 FlatView *fv;
507 hwaddr offset_within_region;
508 hwaddr offset_within_address_space;
509 bool readonly;
510 bool nonvolatile;
513 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
514 MemoryRegionSection *b)
516 return a->mr == b->mr &&
517 a->fv == b->fv &&
518 a->offset_within_region == b->offset_within_region &&
519 a->offset_within_address_space == b->offset_within_address_space &&
520 int128_eq(a->size, b->size) &&
521 a->readonly == b->readonly &&
522 a->nonvolatile == b->nonvolatile;
526 * memory_region_init: Initialize a memory region
528 * The region typically acts as a container for other memory regions. Use
529 * memory_region_add_subregion() to add subregions.
531 * @mr: the #MemoryRegion to be initialized
532 * @owner: the object that tracks the region's reference count
533 * @name: used for debugging; not visible to the user or ABI
534 * @size: size of the region; any subregions beyond this size will be clipped
536 void memory_region_init(MemoryRegion *mr,
537 struct Object *owner,
538 const char *name,
539 uint64_t size);
542 * memory_region_ref: Add 1 to a memory region's reference count
544 * Whenever memory regions are accessed outside the BQL, they need to be
545 * preserved against hot-unplug. MemoryRegions actually do not have their
546 * own reference count; they piggyback on a QOM object, their "owner".
547 * This function adds a reference to the owner.
549 * All MemoryRegions must have an owner if they can disappear, even if the
550 * device they belong to operates exclusively under the BQL. This is because
551 * the region could be returned at any time by memory_region_find, and this
552 * is usually under guest control.
554 * @mr: the #MemoryRegion
556 void memory_region_ref(MemoryRegion *mr);
559 * memory_region_unref: Remove 1 to a memory region's reference count
561 * Whenever memory regions are accessed outside the BQL, they need to be
562 * preserved against hot-unplug. MemoryRegions actually do not have their
563 * own reference count; they piggyback on a QOM object, their "owner".
564 * This function removes a reference to the owner and possibly destroys it.
566 * @mr: the #MemoryRegion
568 void memory_region_unref(MemoryRegion *mr);
571 * memory_region_init_io: Initialize an I/O memory region.
573 * Accesses into the region will cause the callbacks in @ops to be called.
574 * if @size is nonzero, subregions will be clipped to @size.
576 * @mr: the #MemoryRegion to be initialized.
577 * @owner: the object that tracks the region's reference count
578 * @ops: a structure containing read and write callbacks to be used when
579 * I/O is performed on the region.
580 * @opaque: passed to the read and write callbacks of the @ops structure.
581 * @name: used for debugging; not visible to the user or ABI
582 * @size: size of the region.
584 void memory_region_init_io(MemoryRegion *mr,
585 struct Object *owner,
586 const MemoryRegionOps *ops,
587 void *opaque,
588 const char *name,
589 uint64_t size);
592 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
593 * into the region will modify memory
594 * directly.
596 * @mr: the #MemoryRegion to be initialized.
597 * @owner: the object that tracks the region's reference count
598 * @name: Region name, becomes part of RAMBlock name used in migration stream
599 * must be unique within any device
600 * @size: size of the region.
601 * @errp: pointer to Error*, to store an error if it happens.
603 * Note that this function does not do anything to cause the data in the
604 * RAM memory region to be migrated; that is the responsibility of the caller.
606 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
607 struct Object *owner,
608 const char *name,
609 uint64_t size,
610 Error **errp);
613 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
614 * Accesses into the region will
615 * modify memory directly.
617 * @mr: the #MemoryRegion to be initialized.
618 * @owner: the object that tracks the region's reference count
619 * @name: Region name, becomes part of RAMBlock name used in migration stream
620 * must be unique within any device
621 * @size: size of the region.
622 * @share: allow remapping RAM to different addresses
623 * @errp: pointer to Error*, to store an error if it happens.
625 * Note that this function is similar to memory_region_init_ram_nomigrate.
626 * The only difference is part of the RAM region can be remapped.
628 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
629 struct Object *owner,
630 const char *name,
631 uint64_t size,
632 bool share,
633 Error **errp);
636 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
637 * RAM. Accesses into the region will
638 * modify memory directly. Only an initial
639 * portion of this RAM is actually used.
640 * The used size can change across reboots.
642 * @mr: the #MemoryRegion to be initialized.
643 * @owner: the object that tracks the region's reference count
644 * @name: Region name, becomes part of RAMBlock name used in migration stream
645 * must be unique within any device
646 * @size: used size of the region.
647 * @max_size: max size of the region.
648 * @resized: callback to notify owner about used size change.
649 * @errp: pointer to Error*, to store an error if it happens.
651 * Note that this function does not do anything to cause the data in the
652 * RAM memory region to be migrated; that is the responsibility of the caller.
654 void memory_region_init_resizeable_ram(MemoryRegion *mr,
655 struct Object *owner,
656 const char *name,
657 uint64_t size,
658 uint64_t max_size,
659 void (*resized)(const char*,
660 uint64_t length,
661 void *host),
662 Error **errp);
663 #ifdef CONFIG_POSIX
666 * memory_region_init_ram_from_file: Initialize RAM memory region with a
667 * mmap-ed backend.
669 * @mr: the #MemoryRegion to be initialized.
670 * @owner: the object that tracks the region's reference count
671 * @name: Region name, becomes part of RAMBlock name used in migration stream
672 * must be unique within any device
673 * @size: size of the region.
674 * @align: alignment of the region base address; if 0, the default alignment
675 * (getpagesize()) will be used.
676 * @ram_flags: Memory region features:
677 * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
678 * - RAM_PMEM: the memory is persistent memory
679 * Other bits are ignored now.
680 * @path: the path in which to allocate the RAM.
681 * @errp: pointer to Error*, to store an error if it happens.
683 * Note that this function does not do anything to cause the data in the
684 * RAM memory region to be migrated; that is the responsibility of the caller.
686 void memory_region_init_ram_from_file(MemoryRegion *mr,
687 struct Object *owner,
688 const char *name,
689 uint64_t size,
690 uint64_t align,
691 uint32_t ram_flags,
692 const char *path,
693 Error **errp);
696 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
697 * mmap-ed backend.
699 * @mr: the #MemoryRegion to be initialized.
700 * @owner: the object that tracks the region's reference count
701 * @name: the name of the region.
702 * @size: size of the region.
703 * @share: %true if memory must be mmaped with the MAP_SHARED flag
704 * @fd: the fd to mmap.
705 * @errp: pointer to Error*, to store an error if it happens.
707 * Note that this function does not do anything to cause the data in the
708 * RAM memory region to be migrated; that is the responsibility of the caller.
710 void memory_region_init_ram_from_fd(MemoryRegion *mr,
711 struct Object *owner,
712 const char *name,
713 uint64_t size,
714 bool share,
715 int fd,
716 Error **errp);
717 #endif
720 * memory_region_init_ram_ptr: Initialize RAM memory region from a
721 * user-provided pointer. Accesses into the
722 * region will modify memory directly.
724 * @mr: the #MemoryRegion to be initialized.
725 * @owner: the object that tracks the region's reference count
726 * @name: Region name, becomes part of RAMBlock name used in migration stream
727 * must be unique within any device
728 * @size: size of the region.
729 * @ptr: memory to be mapped; must contain at least @size bytes.
731 * Note that this function does not do anything to cause the data in the
732 * RAM memory region to be migrated; that is the responsibility of the caller.
734 void memory_region_init_ram_ptr(MemoryRegion *mr,
735 struct Object *owner,
736 const char *name,
737 uint64_t size,
738 void *ptr);
741 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
742 * a user-provided pointer.
744 * A RAM device represents a mapping to a physical device, such as to a PCI
745 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
746 * into the VM address space and access to the region will modify memory
747 * directly. However, the memory region should not be included in a memory
748 * dump (device may not be enabled/mapped at the time of the dump), and
749 * operations incompatible with manipulating MMIO should be avoided. Replaces
750 * skip_dump flag.
752 * @mr: the #MemoryRegion to be initialized.
753 * @owner: the object that tracks the region's reference count
754 * @name: the name of the region.
755 * @size: size of the region.
756 * @ptr: memory to be mapped; must contain at least @size bytes.
758 * Note that this function does not do anything to cause the data in the
759 * RAM memory region to be migrated; that is the responsibility of the caller.
760 * (For RAM device memory regions, migrating the contents rarely makes sense.)
762 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
763 struct Object *owner,
764 const char *name,
765 uint64_t size,
766 void *ptr);
769 * memory_region_init_alias: Initialize a memory region that aliases all or a
770 * part of another memory region.
772 * @mr: the #MemoryRegion to be initialized.
773 * @owner: the object that tracks the region's reference count
774 * @name: used for debugging; not visible to the user or ABI
775 * @orig: the region to be referenced; @mr will be equivalent to
776 * @orig between @offset and @offset + @size - 1.
777 * @offset: start of the section in @orig to be referenced.
778 * @size: size of the region.
780 void memory_region_init_alias(MemoryRegion *mr,
781 struct Object *owner,
782 const char *name,
783 MemoryRegion *orig,
784 hwaddr offset,
785 uint64_t size);
788 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
790 * This has the same effect as calling memory_region_init_ram_nomigrate()
791 * and then marking the resulting region read-only with
792 * memory_region_set_readonly().
794 * Note that this function does not do anything to cause the data in the
795 * RAM side of the memory region to be migrated; that is the responsibility
796 * of the caller.
798 * @mr: the #MemoryRegion to be initialized.
799 * @owner: the object that tracks the region's reference count
800 * @name: Region name, becomes part of RAMBlock name used in migration stream
801 * must be unique within any device
802 * @size: size of the region.
803 * @errp: pointer to Error*, to store an error if it happens.
805 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
806 struct Object *owner,
807 const char *name,
808 uint64_t size,
809 Error **errp);
812 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
813 * Writes are handled via callbacks.
815 * Note that this function does not do anything to cause the data in the
816 * RAM side of the memory region to be migrated; that is the responsibility
817 * of the caller.
819 * @mr: the #MemoryRegion to be initialized.
820 * @owner: the object that tracks the region's reference count
821 * @ops: callbacks for write access handling (must not be NULL).
822 * @opaque: passed to the read and write callbacks of the @ops structure.
823 * @name: Region name, becomes part of RAMBlock name used in migration stream
824 * must be unique within any device
825 * @size: size of the region.
826 * @errp: pointer to Error*, to store an error if it happens.
828 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
829 struct Object *owner,
830 const MemoryRegionOps *ops,
831 void *opaque,
832 const char *name,
833 uint64_t size,
834 Error **errp);
837 * memory_region_init_iommu: Initialize a memory region of a custom type
838 * that translates addresses
840 * An IOMMU region translates addresses and forwards accesses to a target
841 * memory region.
843 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
844 * @_iommu_mr should be a pointer to enough memory for an instance of
845 * that subclass, @instance_size is the size of that subclass, and
846 * @mrtypename is its name. This function will initialize @_iommu_mr as an
847 * instance of the subclass, and its methods will then be called to handle
848 * accesses to the memory region. See the documentation of
849 * #IOMMUMemoryRegionClass for further details.
851 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
852 * @instance_size: the IOMMUMemoryRegion subclass instance size
853 * @mrtypename: the type name of the #IOMMUMemoryRegion
854 * @owner: the object that tracks the region's reference count
855 * @name: used for debugging; not visible to the user or ABI
856 * @size: size of the region.
858 void memory_region_init_iommu(void *_iommu_mr,
859 size_t instance_size,
860 const char *mrtypename,
861 Object *owner,
862 const char *name,
863 uint64_t size);
866 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
867 * region will modify memory directly.
869 * @mr: the #MemoryRegion to be initialized
870 * @owner: the object that tracks the region's reference count (must be
871 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
872 * @name: name of the memory region
873 * @size: size of the region in bytes
874 * @errp: pointer to Error*, to store an error if it happens.
876 * This function allocates RAM for a board model or device, and
877 * arranges for it to be migrated (by calling vmstate_register_ram()
878 * if @owner is a DeviceState, or vmstate_register_ram_global() if
879 * @owner is NULL).
881 * TODO: Currently we restrict @owner to being either NULL (for
882 * global RAM regions with no owner) or devices, so that we can
883 * give the RAM block a unique name for migration purposes.
884 * We should lift this restriction and allow arbitrary Objects.
885 * If you pass a non-NULL non-device @owner then we will assert.
887 void memory_region_init_ram(MemoryRegion *mr,
888 struct Object *owner,
889 const char *name,
890 uint64_t size,
891 Error **errp);
894 * memory_region_init_rom: Initialize a ROM memory region.
896 * This has the same effect as calling memory_region_init_ram()
897 * and then marking the resulting region read-only with
898 * memory_region_set_readonly(). This includes arranging for the
899 * contents to be migrated.
901 * TODO: Currently we restrict @owner to being either NULL (for
902 * global RAM regions with no owner) or devices, so that we can
903 * give the RAM block a unique name for migration purposes.
904 * We should lift this restriction and allow arbitrary Objects.
905 * If you pass a non-NULL non-device @owner then we will assert.
907 * @mr: the #MemoryRegion to be initialized.
908 * @owner: the object that tracks the region's reference count
909 * @name: Region name, becomes part of RAMBlock name used in migration stream
910 * must be unique within any device
911 * @size: size of the region.
912 * @errp: pointer to Error*, to store an error if it happens.
914 void memory_region_init_rom(MemoryRegion *mr,
915 struct Object *owner,
916 const char *name,
917 uint64_t size,
918 Error **errp);
921 * memory_region_init_rom_device: Initialize a ROM memory region.
922 * Writes are handled via callbacks.
924 * This function initializes a memory region backed by RAM for reads
925 * and callbacks for writes, and arranges for the RAM backing to
926 * be migrated (by calling vmstate_register_ram()
927 * if @owner is a DeviceState, or vmstate_register_ram_global() if
928 * @owner is NULL).
930 * TODO: Currently we restrict @owner to being either NULL (for
931 * global RAM regions with no owner) or devices, so that we can
932 * give the RAM block a unique name for migration purposes.
933 * We should lift this restriction and allow arbitrary Objects.
934 * If you pass a non-NULL non-device @owner then we will assert.
936 * @mr: the #MemoryRegion to be initialized.
937 * @owner: the object that tracks the region's reference count
938 * @ops: callbacks for write access handling (must not be NULL).
939 * @name: Region name, becomes part of RAMBlock name used in migration stream
940 * must be unique within any device
941 * @size: size of the region.
942 * @errp: pointer to Error*, to store an error if it happens.
944 void memory_region_init_rom_device(MemoryRegion *mr,
945 struct Object *owner,
946 const MemoryRegionOps *ops,
947 void *opaque,
948 const char *name,
949 uint64_t size,
950 Error **errp);
954 * memory_region_owner: get a memory region's owner.
956 * @mr: the memory region being queried.
958 struct Object *memory_region_owner(MemoryRegion *mr);
961 * memory_region_size: get a memory region's size.
963 * @mr: the memory region being queried.
965 uint64_t memory_region_size(MemoryRegion *mr);
968 * memory_region_is_ram: check whether a memory region is random access
970 * Returns %true if a memory region is random access.
972 * @mr: the memory region being queried
974 static inline bool memory_region_is_ram(MemoryRegion *mr)
976 return mr->ram;
980 * memory_region_is_ram_device: check whether a memory region is a ram device
982 * Returns %true if a memory region is a device backed ram region
984 * @mr: the memory region being queried
986 bool memory_region_is_ram_device(MemoryRegion *mr);
989 * memory_region_is_romd: check whether a memory region is in ROMD mode
991 * Returns %true if a memory region is a ROM device and currently set to allow
992 * direct reads.
994 * @mr: the memory region being queried
996 static inline bool memory_region_is_romd(MemoryRegion *mr)
998 return mr->rom_device && mr->romd_mode;
1002 * memory_region_get_iommu: check whether a memory region is an iommu
1004 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1005 * otherwise NULL.
1007 * @mr: the memory region being queried
1009 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1011 if (mr->alias) {
1012 return memory_region_get_iommu(mr->alias);
1014 if (mr->is_iommu) {
1015 return (IOMMUMemoryRegion *) mr;
1017 return NULL;
1021 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1022 * if an iommu or NULL if not
1024 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1025 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1027 * @mr: the memory region being queried
1029 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1030 IOMMUMemoryRegion *iommu_mr)
1032 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1035 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1038 * memory_region_iommu_get_min_page_size: get minimum supported page size
1039 * for an iommu
1041 * Returns minimum supported page size for an iommu.
1043 * @iommu_mr: the memory region being queried
1045 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1048 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1050 * The notification type will be decided by entry.perm bits:
1052 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
1053 * - For MAP (newly added entry) notifies: set entry.perm to the
1054 * permission of the page (which is definitely !IOMMU_NONE).
1056 * Note: for any IOMMU implementation, an in-place mapping change
1057 * should be notified with an UNMAP followed by a MAP.
1059 * @iommu_mr: the memory region that was changed
1060 * @iommu_idx: the IOMMU index for the translation table which has changed
1061 * @entry: the new entry in the IOMMU translation table. The entry
1062 * replaces all old entries for the same virtual I/O address range.
1063 * Deleted entries have .@perm == 0.
1065 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1066 int iommu_idx,
1067 IOMMUTLBEntry entry);
1070 * memory_region_notify_one: notify a change in an IOMMU translation
1071 * entry to a single notifier
1073 * This works just like memory_region_notify_iommu(), but it only
1074 * notifies a specific notifier, not all of them.
1076 * @notifier: the notifier to be notified
1077 * @entry: the new entry in the IOMMU translation table. The entry
1078 * replaces all old entries for the same virtual I/O address range.
1079 * Deleted entries have .@perm == 0.
1081 void memory_region_notify_one(IOMMUNotifier *notifier,
1082 IOMMUTLBEntry *entry);
1085 * memory_region_register_iommu_notifier: register a notifier for changes to
1086 * IOMMU translation entries.
1088 * Returns 0 on success, or a negative errno otherwise. In particular,
1089 * -EINVAL indicates that at least one of the attributes of the notifier
1090 * is not supported (flag/range) by the IOMMU memory region. In case of error
1091 * the error object must be created.
1093 * @mr: the memory region to observe
1094 * @n: the IOMMUNotifier to be added; the notify callback receives a
1095 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1096 * ceases to be valid on exit from the notifier.
1098 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1099 IOMMUNotifier *n, Error **errp);
1102 * memory_region_iommu_replay: replay existing IOMMU translations to
1103 * a notifier with the minimum page granularity returned by
1104 * mr->iommu_ops->get_page_size().
1106 * Note: this is not related to record-and-replay functionality.
1108 * @iommu_mr: the memory region to observe
1109 * @n: the notifier to which to replay iommu mappings
1111 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1114 * memory_region_unregister_iommu_notifier: unregister a notifier for
1115 * changes to IOMMU translation entries.
1117 * @mr: the memory region which was observed and for which notity_stopped()
1118 * needs to be called
1119 * @n: the notifier to be removed.
1121 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1122 IOMMUNotifier *n);
1125 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1126 * defined on the IOMMU.
1128 * Returns 0 on success, or a negative errno otherwise. In particular,
1129 * -EINVAL indicates that the IOMMU does not support the requested
1130 * attribute.
1132 * @iommu_mr: the memory region
1133 * @attr: the requested attribute
1134 * @data: a pointer to the requested attribute data
1136 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1137 enum IOMMUMemoryRegionAttr attr,
1138 void *data);
1141 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1142 * use for translations with the given memory transaction attributes.
1144 * @iommu_mr: the memory region
1145 * @attrs: the memory transaction attributes
1147 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1148 MemTxAttrs attrs);
1151 * memory_region_iommu_num_indexes: return the total number of IOMMU
1152 * indexes that this IOMMU supports.
1154 * @iommu_mr: the memory region
1156 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1159 * memory_region_name: get a memory region's name
1161 * Returns the string that was used to initialize the memory region.
1163 * @mr: the memory region being queried
1165 const char *memory_region_name(const MemoryRegion *mr);
1168 * memory_region_is_logging: return whether a memory region is logging writes
1170 * Returns %true if the memory region is logging writes for the given client
1172 * @mr: the memory region being queried
1173 * @client: the client being queried
1175 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1178 * memory_region_get_dirty_log_mask: return the clients for which a
1179 * memory region is logging writes.
1181 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1182 * are the bit indices.
1184 * @mr: the memory region being queried
1186 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1189 * memory_region_is_rom: check whether a memory region is ROM
1191 * Returns %true if a memory region is read-only memory.
1193 * @mr: the memory region being queried
1195 static inline bool memory_region_is_rom(MemoryRegion *mr)
1197 return mr->ram && mr->readonly;
1201 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1203 * Returns %true is a memory region is non-volatile memory.
1205 * @mr: the memory region being queried
1207 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1209 return mr->nonvolatile;
1213 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1215 * Returns a file descriptor backing a file-based RAM memory region,
1216 * or -1 if the region is not a file-based RAM memory region.
1218 * @mr: the RAM or alias memory region being queried.
1220 int memory_region_get_fd(MemoryRegion *mr);
1223 * memory_region_from_host: Convert a pointer into a RAM memory region
1224 * and an offset within it.
1226 * Given a host pointer inside a RAM memory region (created with
1227 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1228 * the MemoryRegion and the offset within it.
1230 * Use with care; by the time this function returns, the returned pointer is
1231 * not protected by RCU anymore. If the caller is not within an RCU critical
1232 * section and does not hold the iothread lock, it must have other means of
1233 * protecting the pointer, such as a reference to the region that includes
1234 * the incoming ram_addr_t.
1236 * @ptr: the host pointer to be converted
1237 * @offset: the offset within memory region
1239 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1242 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1244 * Returns a host pointer to a RAM memory region (created with
1245 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1247 * Use with care; by the time this function returns, the returned pointer is
1248 * not protected by RCU anymore. If the caller is not within an RCU critical
1249 * section and does not hold the iothread lock, it must have other means of
1250 * protecting the pointer, such as a reference to the region that includes
1251 * the incoming ram_addr_t.
1253 * @mr: the memory region being queried.
1255 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1257 /* memory_region_ram_resize: Resize a RAM region.
1259 * Only legal before guest might have detected the memory size: e.g. on
1260 * incoming migration, or right after reset.
1262 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1263 * @newsize: the new size the region
1264 * @errp: pointer to Error*, to store an error if it happens.
1266 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1267 Error **errp);
1269 * memory_region_do_writeback: Trigger writeback for selected address range
1270 * [addr, addr + size]
1273 void memory_region_do_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
1276 * memory_region_set_log: Turn dirty logging on or off for a region.
1278 * Turns dirty logging on or off for a specified client (display, migration).
1279 * Only meaningful for RAM regions.
1281 * @mr: the memory region being updated.
1282 * @log: whether dirty logging is to be enabled or disabled.
1283 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1285 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1288 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1290 * Marks a range of bytes as dirty, after it has been dirtied outside
1291 * guest code.
1293 * @mr: the memory region being dirtied.
1294 * @addr: the address (relative to the start of the region) being dirtied.
1295 * @size: size of the range being dirtied.
1297 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1298 hwaddr size);
1301 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1303 * This function is called when the caller wants to clear the remote
1304 * dirty bitmap of a memory range within the memory region. This can
1305 * be used by e.g. KVM to manually clear dirty log when
1306 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1307 * kernel.
1309 * @mr: the memory region to clear the dirty log upon
1310 * @start: start address offset within the memory region
1311 * @len: length of the memory region to clear dirty bitmap
1313 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
1314 hwaddr len);
1317 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1318 * bitmap and clear it.
1320 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1321 * returns the snapshot. The snapshot can then be used to query dirty
1322 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
1323 * querying the same page multiple times, which is especially useful for
1324 * display updates where the scanlines often are not page aligned.
1326 * The dirty bitmap region which gets copyed into the snapshot (and
1327 * cleared afterwards) can be larger than requested. The boundaries
1328 * are rounded up/down so complete bitmap longs (covering 64 pages on
1329 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1330 * isn't a problem for display updates as the extra pages are outside
1331 * the visible area, and in case the visible area changes a full
1332 * display redraw is due anyway. Should other use cases for this
1333 * function emerge we might have to revisit this implementation
1334 * detail.
1336 * Use g_free to release DirtyBitmapSnapshot.
1338 * @mr: the memory region being queried.
1339 * @addr: the address (relative to the start of the region) being queried.
1340 * @size: the size of the range being queried.
1341 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1343 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1344 hwaddr addr,
1345 hwaddr size,
1346 unsigned client);
1349 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1350 * in the specified dirty bitmap snapshot.
1352 * @mr: the memory region being queried.
1353 * @snap: the dirty bitmap snapshot
1354 * @addr: the address (relative to the start of the region) being queried.
1355 * @size: the size of the range being queried.
1357 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1358 DirtyBitmapSnapshot *snap,
1359 hwaddr addr, hwaddr size);
1362 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1363 * client.
1365 * Marks a range of pages as no longer dirty.
1367 * @mr: the region being updated.
1368 * @addr: the start of the subrange being cleaned.
1369 * @size: the size of the subrange being cleaned.
1370 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1371 * %DIRTY_MEMORY_VGA.
1373 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1374 hwaddr size, unsigned client);
1377 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
1378 * TBs (for self-modifying code).
1380 * The MemoryRegionOps->write() callback of a ROM device must use this function
1381 * to mark byte ranges that have been modified internally, such as by directly
1382 * accessing the memory returned by memory_region_get_ram_ptr().
1384 * This function marks the range dirty and invalidates TBs so that TCG can
1385 * detect self-modifying code.
1387 * @mr: the region being flushed.
1388 * @addr: the start, relative to the start of the region, of the range being
1389 * flushed.
1390 * @size: the size, in bytes, of the range being flushed.
1392 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
1395 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1397 * Allows a memory region to be marked as read-only (turning it into a ROM).
1398 * only useful on RAM regions.
1400 * @mr: the region being updated.
1401 * @readonly: whether rhe region is to be ROM or RAM.
1403 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1406 * memory_region_set_nonvolatile: Turn a memory region non-volatile
1408 * Allows a memory region to be marked as non-volatile.
1409 * only useful on RAM regions.
1411 * @mr: the region being updated.
1412 * @nonvolatile: whether rhe region is to be non-volatile.
1414 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
1417 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1419 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1420 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1421 * device is mapped to guest memory and satisfies read access directly.
1422 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1423 * Writes are always handled by the #MemoryRegion.write function.
1425 * @mr: the memory region to be updated
1426 * @romd_mode: %true to put the region into ROMD mode
1428 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1431 * memory_region_set_coalescing: Enable memory coalescing for the region.
1433 * Enabled writes to a region to be queued for later processing. MMIO ->write
1434 * callbacks may be delayed until a non-coalesced MMIO is issued.
1435 * Only useful for IO regions. Roughly similar to write-combining hardware.
1437 * @mr: the memory region to be write coalesced
1439 void memory_region_set_coalescing(MemoryRegion *mr);
1442 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1443 * a region.
1445 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1446 * Multiple calls can be issued coalesced disjoint ranges.
1448 * @mr: the memory region to be updated.
1449 * @offset: the start of the range within the region to be coalesced.
1450 * @size: the size of the subrange to be coalesced.
1452 void memory_region_add_coalescing(MemoryRegion *mr,
1453 hwaddr offset,
1454 uint64_t size);
1457 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1459 * Disables any coalescing caused by memory_region_set_coalescing() or
1460 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1461 * hardware.
1463 * @mr: the memory region to be updated.
1465 void memory_region_clear_coalescing(MemoryRegion *mr);
1468 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1469 * accesses.
1471 * Ensure that pending coalesced MMIO request are flushed before the memory
1472 * region is accessed. This property is automatically enabled for all regions
1473 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1475 * @mr: the memory region to be updated.
1477 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1480 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1481 * accesses.
1483 * Clear the automatic coalesced MMIO flushing enabled via
1484 * memory_region_set_flush_coalesced. Note that this service has no effect on
1485 * memory regions that have MMIO coalescing enabled for themselves. For them,
1486 * automatic flushing will stop once coalescing is disabled.
1488 * @mr: the memory region to be updated.
1490 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1493 * memory_region_clear_global_locking: Declares that access processing does
1494 * not depend on the QEMU global lock.
1496 * By clearing this property, accesses to the memory region will be processed
1497 * outside of QEMU's global lock (unless the lock is held on when issuing the
1498 * access request). In this case, the device model implementing the access
1499 * handlers is responsible for synchronization of concurrency.
1501 * @mr: the memory region to be updated.
1503 void memory_region_clear_global_locking(MemoryRegion *mr);
1506 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1507 * is written to a location.
1509 * Marks a word in an IO region (initialized with memory_region_init_io())
1510 * as a trigger for an eventfd event. The I/O callback will not be called.
1511 * The caller must be prepared to handle failure (that is, take the required
1512 * action if the callback _is_ called).
1514 * @mr: the memory region being updated.
1515 * @addr: the address within @mr that is to be monitored
1516 * @size: the size of the access to trigger the eventfd
1517 * @match_data: whether to match against @data, instead of just @addr
1518 * @data: the data to match against the guest write
1519 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1521 void memory_region_add_eventfd(MemoryRegion *mr,
1522 hwaddr addr,
1523 unsigned size,
1524 bool match_data,
1525 uint64_t data,
1526 EventNotifier *e);
1529 * memory_region_del_eventfd: Cancel an eventfd.
1531 * Cancels an eventfd trigger requested by a previous
1532 * memory_region_add_eventfd() call.
1534 * @mr: the memory region being updated.
1535 * @addr: the address within @mr that is to be monitored
1536 * @size: the size of the access to trigger the eventfd
1537 * @match_data: whether to match against @data, instead of just @addr
1538 * @data: the data to match against the guest write
1539 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1541 void memory_region_del_eventfd(MemoryRegion *mr,
1542 hwaddr addr,
1543 unsigned size,
1544 bool match_data,
1545 uint64_t data,
1546 EventNotifier *e);
1549 * memory_region_add_subregion: Add a subregion to a container.
1551 * Adds a subregion at @offset. The subregion may not overlap with other
1552 * subregions (except for those explicitly marked as overlapping). A region
1553 * may only be added once as a subregion (unless removed with
1554 * memory_region_del_subregion()); use memory_region_init_alias() if you
1555 * want a region to be a subregion in multiple locations.
1557 * @mr: the region to contain the new subregion; must be a container
1558 * initialized with memory_region_init().
1559 * @offset: the offset relative to @mr where @subregion is added.
1560 * @subregion: the subregion to be added.
1562 void memory_region_add_subregion(MemoryRegion *mr,
1563 hwaddr offset,
1564 MemoryRegion *subregion);
1566 * memory_region_add_subregion_overlap: Add a subregion to a container
1567 * with overlap.
1569 * Adds a subregion at @offset. The subregion may overlap with other
1570 * subregions. Conflicts are resolved by having a higher @priority hide a
1571 * lower @priority. Subregions without priority are taken as @priority 0.
1572 * A region may only be added once as a subregion (unless removed with
1573 * memory_region_del_subregion()); use memory_region_init_alias() if you
1574 * want a region to be a subregion in multiple locations.
1576 * @mr: the region to contain the new subregion; must be a container
1577 * initialized with memory_region_init().
1578 * @offset: the offset relative to @mr where @subregion is added.
1579 * @subregion: the subregion to be added.
1580 * @priority: used for resolving overlaps; highest priority wins.
1582 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1583 hwaddr offset,
1584 MemoryRegion *subregion,
1585 int priority);
1588 * memory_region_get_ram_addr: Get the ram address associated with a memory
1589 * region
1591 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1593 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1595 * memory_region_del_subregion: Remove a subregion.
1597 * Removes a subregion from its container.
1599 * @mr: the container to be updated.
1600 * @subregion: the region being removed; must be a current subregion of @mr.
1602 void memory_region_del_subregion(MemoryRegion *mr,
1603 MemoryRegion *subregion);
1606 * memory_region_set_enabled: dynamically enable or disable a region
1608 * Enables or disables a memory region. A disabled memory region
1609 * ignores all accesses to itself and its subregions. It does not
1610 * obscure sibling subregions with lower priority - it simply behaves as
1611 * if it was removed from the hierarchy.
1613 * Regions default to being enabled.
1615 * @mr: the region to be updated
1616 * @enabled: whether to enable or disable the region
1618 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1621 * memory_region_set_address: dynamically update the address of a region
1623 * Dynamically updates the address of a region, relative to its container.
1624 * May be used on regions are currently part of a memory hierarchy.
1626 * @mr: the region to be updated
1627 * @addr: new address, relative to container region
1629 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1632 * memory_region_set_size: dynamically update the size of a region.
1634 * Dynamically updates the size of a region.
1636 * @mr: the region to be updated
1637 * @size: used size of the region.
1639 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1642 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1644 * Dynamically updates the offset into the target region that an alias points
1645 * to, as if the fourth argument to memory_region_init_alias() has changed.
1647 * @mr: the #MemoryRegion to be updated; should be an alias.
1648 * @offset: the new offset into the target memory region
1650 void memory_region_set_alias_offset(MemoryRegion *mr,
1651 hwaddr offset);
1654 * memory_region_present: checks if an address relative to a @container
1655 * translates into #MemoryRegion within @container
1657 * Answer whether a #MemoryRegion within @container covers the address
1658 * @addr.
1660 * @container: a #MemoryRegion within which @addr is a relative address
1661 * @addr: the area within @container to be searched
1663 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1666 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1667 * into any address space.
1669 * @mr: a #MemoryRegion which should be checked if it's mapped
1671 bool memory_region_is_mapped(MemoryRegion *mr);
1674 * memory_region_find: translate an address/size relative to a
1675 * MemoryRegion into a #MemoryRegionSection.
1677 * Locates the first #MemoryRegion within @mr that overlaps the range
1678 * given by @addr and @size.
1680 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1681 * It will have the following characteristics:
1682 * .@size = 0 iff no overlap was found
1683 * .@mr is non-%NULL iff an overlap was found
1685 * Remember that in the return value the @offset_within_region is
1686 * relative to the returned region (in the .@mr field), not to the
1687 * @mr argument.
1689 * Similarly, the .@offset_within_address_space is relative to the
1690 * address space that contains both regions, the passed and the
1691 * returned one. However, in the special case where the @mr argument
1692 * has no container (and thus is the root of the address space), the
1693 * following will hold:
1694 * .@offset_within_address_space >= @addr
1695 * .@offset_within_address_space + .@size <= @addr + @size
1697 * @mr: a MemoryRegion within which @addr is a relative address
1698 * @addr: start of the area within @as to be searched
1699 * @size: size of the area to be searched
1701 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1702 hwaddr addr, uint64_t size);
1705 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1707 * Synchronizes the dirty page log for all address spaces.
1709 void memory_global_dirty_log_sync(void);
1712 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1714 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
1715 * This function must be called after the dirty log bitmap is cleared, and
1716 * before dirty guest memory pages are read. If you are using
1717 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
1718 * care of doing this.
1720 void memory_global_after_dirty_log_sync(void);
1723 * memory_region_transaction_begin: Start a transaction.
1725 * During a transaction, changes will be accumulated and made visible
1726 * only when the transaction ends (is committed).
1728 void memory_region_transaction_begin(void);
1731 * memory_region_transaction_commit: Commit a transaction and make changes
1732 * visible to the guest.
1734 void memory_region_transaction_commit(void);
1737 * memory_listener_register: register callbacks to be called when memory
1738 * sections are mapped or unmapped into an address
1739 * space
1741 * @listener: an object containing the callbacks to be called
1742 * @filter: if non-%NULL, only regions in this address space will be observed
1744 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1747 * memory_listener_unregister: undo the effect of memory_listener_register()
1749 * @listener: an object containing the callbacks to be removed
1751 void memory_listener_unregister(MemoryListener *listener);
1754 * memory_global_dirty_log_start: begin dirty logging for all regions
1756 void memory_global_dirty_log_start(void);
1759 * memory_global_dirty_log_stop: end dirty logging for all regions
1761 void memory_global_dirty_log_stop(void);
1763 void mtree_info(bool flatview, bool dispatch_tree, bool owner);
1766 * memory_region_dispatch_read: perform a read directly to the specified
1767 * MemoryRegion.
1769 * @mr: #MemoryRegion to access
1770 * @addr: address within that region
1771 * @pval: pointer to uint64_t which the data is written to
1772 * @op: size, sign, and endianness of the memory operation
1773 * @attrs: memory transaction attributes to use for the access
1775 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1776 hwaddr addr,
1777 uint64_t *pval,
1778 MemOp op,
1779 MemTxAttrs attrs);
1781 * memory_region_dispatch_write: perform a write directly to the specified
1782 * MemoryRegion.
1784 * @mr: #MemoryRegion to access
1785 * @addr: address within that region
1786 * @data: data to write
1787 * @op: size, sign, and endianness of the memory operation
1788 * @attrs: memory transaction attributes to use for the access
1790 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1791 hwaddr addr,
1792 uint64_t data,
1793 MemOp op,
1794 MemTxAttrs attrs);
1797 * address_space_init: initializes an address space
1799 * @as: an uninitialized #AddressSpace
1800 * @root: a #MemoryRegion that routes addresses for the address space
1801 * @name: an address space name. The name is only used for debugging
1802 * output.
1804 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1807 * address_space_destroy: destroy an address space
1809 * Releases all resources associated with an address space. After an address space
1810 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1811 * as well.
1813 * @as: address space to be destroyed
1815 void address_space_destroy(AddressSpace *as);
1818 * address_space_remove_listeners: unregister all listeners of an address space
1820 * Removes all callbacks previously registered with memory_listener_register()
1821 * for @as.
1823 * @as: an initialized #AddressSpace
1825 void address_space_remove_listeners(AddressSpace *as);
1828 * address_space_rw: read from or write to an address space.
1830 * Return a MemTxResult indicating whether the operation succeeded
1831 * or failed (eg unassigned memory, device rejected the transaction,
1832 * IOMMU fault).
1834 * @as: #AddressSpace to be accessed
1835 * @addr: address within that address space
1836 * @attrs: memory transaction attributes
1837 * @buf: buffer with the data transferred
1838 * @len: the number of bytes to read or write
1839 * @is_write: indicates the transfer direction
1841 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1842 MemTxAttrs attrs, uint8_t *buf,
1843 hwaddr len, bool is_write);
1846 * address_space_write: write to address space.
1848 * Return a MemTxResult indicating whether the operation succeeded
1849 * or failed (eg unassigned memory, device rejected the transaction,
1850 * IOMMU fault).
1852 * @as: #AddressSpace to be accessed
1853 * @addr: address within that address space
1854 * @attrs: memory transaction attributes
1855 * @buf: buffer with the data transferred
1856 * @len: the number of bytes to write
1858 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1859 MemTxAttrs attrs,
1860 const uint8_t *buf, hwaddr len);
1863 * address_space_write_rom: write to address space, including ROM.
1865 * This function writes to the specified address space, but will
1866 * write data to both ROM and RAM. This is used for non-guest
1867 * writes like writes from the gdb debug stub or initial loading
1868 * of ROM contents.
1870 * Note that portions of the write which attempt to write data to
1871 * a device will be silently ignored -- only real RAM and ROM will
1872 * be written to.
1874 * Return a MemTxResult indicating whether the operation succeeded
1875 * or failed (eg unassigned memory, device rejected the transaction,
1876 * IOMMU fault).
1878 * @as: #AddressSpace to be accessed
1879 * @addr: address within that address space
1880 * @attrs: memory transaction attributes
1881 * @buf: buffer with the data transferred
1882 * @len: the number of bytes to write
1884 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
1885 MemTxAttrs attrs,
1886 const uint8_t *buf, hwaddr len);
1888 /* address_space_ld*: load from an address space
1889 * address_space_st*: store to an address space
1891 * These functions perform a load or store of the byte, word,
1892 * longword or quad to the specified address within the AddressSpace.
1893 * The _le suffixed functions treat the data as little endian;
1894 * _be indicates big endian; no suffix indicates "same endianness
1895 * as guest CPU".
1897 * The "guest CPU endianness" accessors are deprecated for use outside
1898 * target-* code; devices should be CPU-agnostic and use either the LE
1899 * or the BE accessors.
1901 * @as #AddressSpace to be accessed
1902 * @addr: address within that address space
1903 * @val: data value, for stores
1904 * @attrs: memory transaction attributes
1905 * @result: location to write the success/failure of the transaction;
1906 * if NULL, this information is discarded
1909 #define SUFFIX
1910 #define ARG1 as
1911 #define ARG1_DECL AddressSpace *as
1912 #include "exec/memory_ldst.inc.h"
1914 #define SUFFIX
1915 #define ARG1 as
1916 #define ARG1_DECL AddressSpace *as
1917 #include "exec/memory_ldst_phys.inc.h"
1919 struct MemoryRegionCache {
1920 void *ptr;
1921 hwaddr xlat;
1922 hwaddr len;
1923 FlatView *fv;
1924 MemoryRegionSection mrs;
1925 bool is_write;
1928 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
1931 /* address_space_ld*_cached: load from a cached #MemoryRegion
1932 * address_space_st*_cached: store into a cached #MemoryRegion
1934 * These functions perform a load or store of the byte, word,
1935 * longword or quad to the specified address. The address is
1936 * a physical address in the AddressSpace, but it must lie within
1937 * a #MemoryRegion that was mapped with address_space_cache_init.
1939 * The _le suffixed functions treat the data as little endian;
1940 * _be indicates big endian; no suffix indicates "same endianness
1941 * as guest CPU".
1943 * The "guest CPU endianness" accessors are deprecated for use outside
1944 * target-* code; devices should be CPU-agnostic and use either the LE
1945 * or the BE accessors.
1947 * @cache: previously initialized #MemoryRegionCache to be accessed
1948 * @addr: address within the address space
1949 * @val: data value, for stores
1950 * @attrs: memory transaction attributes
1951 * @result: location to write the success/failure of the transaction;
1952 * if NULL, this information is discarded
1955 #define SUFFIX _cached_slow
1956 #define ARG1 cache
1957 #define ARG1_DECL MemoryRegionCache *cache
1958 #include "exec/memory_ldst.inc.h"
1960 /* Inline fast path for direct RAM access. */
1961 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
1962 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
1964 assert(addr < cache->len);
1965 if (likely(cache->ptr)) {
1966 return ldub_p(cache->ptr + addr);
1967 } else {
1968 return address_space_ldub_cached_slow(cache, addr, attrs, result);
1972 static inline void address_space_stb_cached(MemoryRegionCache *cache,
1973 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1975 assert(addr < cache->len);
1976 if (likely(cache->ptr)) {
1977 stb_p(cache->ptr + addr, val);
1978 } else {
1979 address_space_stb_cached_slow(cache, addr, val, attrs, result);
1983 #define ENDIANNESS _le
1984 #include "exec/memory_ldst_cached.inc.h"
1986 #define ENDIANNESS _be
1987 #include "exec/memory_ldst_cached.inc.h"
1989 #define SUFFIX _cached
1990 #define ARG1 cache
1991 #define ARG1_DECL MemoryRegionCache *cache
1992 #include "exec/memory_ldst_phys.inc.h"
1994 /* address_space_cache_init: prepare for repeated access to a physical
1995 * memory region
1997 * @cache: #MemoryRegionCache to be filled
1998 * @as: #AddressSpace to be accessed
1999 * @addr: address within that address space
2000 * @len: length of buffer
2001 * @is_write: indicates the transfer direction
2003 * Will only work with RAM, and may map a subset of the requested range by
2004 * returning a value that is less than @len. On failure, return a negative
2005 * errno value.
2007 * Because it only works with RAM, this function can be used for
2008 * read-modify-write operations. In this case, is_write should be %true.
2010 * Note that addresses passed to the address_space_*_cached functions
2011 * are relative to @addr.
2013 int64_t address_space_cache_init(MemoryRegionCache *cache,
2014 AddressSpace *as,
2015 hwaddr addr,
2016 hwaddr len,
2017 bool is_write);
2020 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2022 * @cache: The #MemoryRegionCache to operate on.
2023 * @addr: The first physical address that was written, relative to the
2024 * address that was passed to @address_space_cache_init.
2025 * @access_len: The number of bytes that were written starting at @addr.
2027 void address_space_cache_invalidate(MemoryRegionCache *cache,
2028 hwaddr addr,
2029 hwaddr access_len);
2032 * address_space_cache_destroy: free a #MemoryRegionCache
2034 * @cache: The #MemoryRegionCache whose memory should be released.
2036 void address_space_cache_destroy(MemoryRegionCache *cache);
2038 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2039 * entry. Should be called from an RCU critical section.
2041 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2042 bool is_write, MemTxAttrs attrs);
2044 /* address_space_translate: translate an address range into an address space
2045 * into a MemoryRegion and an address range into that section. Should be
2046 * called from an RCU critical section, to avoid that the last reference
2047 * to the returned region disappears after address_space_translate returns.
2049 * @fv: #FlatView to be accessed
2050 * @addr: address within that address space
2051 * @xlat: pointer to address within the returned memory region section's
2052 * #MemoryRegion.
2053 * @len: pointer to length
2054 * @is_write: indicates the transfer direction
2055 * @attrs: memory attributes
2057 MemoryRegion *flatview_translate(FlatView *fv,
2058 hwaddr addr, hwaddr *xlat,
2059 hwaddr *len, bool is_write,
2060 MemTxAttrs attrs);
2062 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2063 hwaddr addr, hwaddr *xlat,
2064 hwaddr *len, bool is_write,
2065 MemTxAttrs attrs)
2067 return flatview_translate(address_space_to_flatview(as),
2068 addr, xlat, len, is_write, attrs);
2071 /* address_space_access_valid: check for validity of accessing an address
2072 * space range
2074 * Check whether memory is assigned to the given address space range, and
2075 * access is permitted by any IOMMU regions that are active for the address
2076 * space.
2078 * For now, addr and len should be aligned to a page size. This limitation
2079 * will be lifted in the future.
2081 * @as: #AddressSpace to be accessed
2082 * @addr: address within that address space
2083 * @len: length of the area to be checked
2084 * @is_write: indicates the transfer direction
2085 * @attrs: memory attributes
2087 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2088 bool is_write, MemTxAttrs attrs);
2090 /* address_space_map: map a physical memory region into a host virtual address
2092 * May map a subset of the requested range, given by and returned in @plen.
2093 * May return %NULL if resources needed to perform the mapping are exhausted.
2094 * Use only for reads OR writes - not for read-modify-write operations.
2095 * Use cpu_register_map_client() to know when retrying the map operation is
2096 * likely to succeed.
2098 * @as: #AddressSpace to be accessed
2099 * @addr: address within that address space
2100 * @plen: pointer to length of buffer; updated on return
2101 * @is_write: indicates the transfer direction
2102 * @attrs: memory attributes
2104 void *address_space_map(AddressSpace *as, hwaddr addr,
2105 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2107 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2109 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2110 * the amount of memory that was actually read or written by the caller.
2112 * @as: #AddressSpace used
2113 * @buffer: host pointer as returned by address_space_map()
2114 * @len: buffer length as returned by address_space_map()
2115 * @access_len: amount of data actually transferred
2116 * @is_write: indicates the transfer direction
2118 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2119 int is_write, hwaddr access_len);
2122 /* Internal functions, part of the implementation of address_space_read. */
2123 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2124 MemTxAttrs attrs, uint8_t *buf, hwaddr len);
2125 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2126 MemTxAttrs attrs, uint8_t *buf,
2127 hwaddr len, hwaddr addr1, hwaddr l,
2128 MemoryRegion *mr);
2129 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2131 /* Internal functions, part of the implementation of address_space_read_cached
2132 * and address_space_write_cached. */
2133 void address_space_read_cached_slow(MemoryRegionCache *cache,
2134 hwaddr addr, void *buf, hwaddr len);
2135 void address_space_write_cached_slow(MemoryRegionCache *cache,
2136 hwaddr addr, const void *buf, hwaddr len);
2138 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2140 if (is_write) {
2141 return memory_region_is_ram(mr) &&
2142 !mr->readonly && !memory_region_is_ram_device(mr);
2143 } else {
2144 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2145 memory_region_is_romd(mr);
2150 * address_space_read: read from an address space.
2152 * Return a MemTxResult indicating whether the operation succeeded
2153 * or failed (eg unassigned memory, device rejected the transaction,
2154 * IOMMU fault). Called within RCU critical section.
2156 * @as: #AddressSpace to be accessed
2157 * @addr: address within that address space
2158 * @attrs: memory transaction attributes
2159 * @buf: buffer with the data transferred
2161 static inline __attribute__((__always_inline__))
2162 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2163 MemTxAttrs attrs, uint8_t *buf,
2164 hwaddr len)
2166 MemTxResult result = MEMTX_OK;
2167 hwaddr l, addr1;
2168 void *ptr;
2169 MemoryRegion *mr;
2170 FlatView *fv;
2172 if (__builtin_constant_p(len)) {
2173 if (len) {
2174 rcu_read_lock();
2175 fv = address_space_to_flatview(as);
2176 l = len;
2177 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2178 if (len == l && memory_access_is_direct(mr, false)) {
2179 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2180 memcpy(buf, ptr, len);
2181 } else {
2182 result = flatview_read_continue(fv, addr, attrs, buf, len,
2183 addr1, l, mr);
2185 rcu_read_unlock();
2187 } else {
2188 result = address_space_read_full(as, addr, attrs, buf, len);
2190 return result;
2194 * address_space_read_cached: read from a cached RAM region
2196 * @cache: Cached region to be addressed
2197 * @addr: address relative to the base of the RAM region
2198 * @buf: buffer with the data transferred
2199 * @len: length of the data transferred
2201 static inline void
2202 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2203 void *buf, hwaddr len)
2205 assert(addr < cache->len && len <= cache->len - addr);
2206 if (likely(cache->ptr)) {
2207 memcpy(buf, cache->ptr + addr, len);
2208 } else {
2209 address_space_read_cached_slow(cache, addr, buf, len);
2214 * address_space_write_cached: write to a cached RAM region
2216 * @cache: Cached region to be addressed
2217 * @addr: address relative to the base of the RAM region
2218 * @buf: buffer with the data transferred
2219 * @len: length of the data transferred
2221 static inline void
2222 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2223 void *buf, hwaddr len)
2225 assert(addr < cache->len && len <= cache->len - addr);
2226 if (likely(cache->ptr)) {
2227 memcpy(cache->ptr + addr, buf, len);
2228 } else {
2229 address_space_write_cached_slow(cache, addr, buf, len);
2233 #ifdef NEED_CPU_H
2234 /* enum device_endian to MemOp. */
2235 static inline MemOp devend_memop(enum device_endian end)
2237 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
2238 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
2240 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
2241 /* Swap if non-host endianness or native (target) endianness */
2242 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
2243 #else
2244 const int non_host_endianness =
2245 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
2247 /* In this case, native (target) endianness needs no swap. */
2248 return (end == non_host_endianness) ? MO_BSWAP : 0;
2249 #endif
2251 #endif
2253 #endif
2255 #endif