2 * Physical memory management API
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #ifndef CONFIG_USER_ONLY
21 #include "qemu-common.h"
22 #include "cpu-common.h"
24 #include "qemu-queue.h"
28 typedef struct MemoryRegionOps MemoryRegionOps
;
29 typedef struct MemoryRegion MemoryRegion
;
30 typedef struct MemoryRegionPortio MemoryRegionPortio
;
31 typedef struct MemoryRegionMmio MemoryRegionMmio
;
33 /* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
36 #define DIRTY_MEMORY_VGA 0
37 #define DIRTY_MEMORY_CODE 1
38 #define DIRTY_MEMORY_MIGRATION 3
40 struct MemoryRegionMmio
{
41 CPUReadMemoryFunc
*read
[3];
42 CPUWriteMemoryFunc
*write
[3];
46 * Memory region callbacks
48 struct MemoryRegionOps
{
49 /* Read from the memory region. @addr is relative to @mr; @size is
51 uint64_t (*read
)(void *opaque
,
52 target_phys_addr_t addr
,
54 /* Write to the memory region. @addr is relative to @mr; @size is
56 void (*write
)(void *opaque
,
57 target_phys_addr_t addr
,
61 enum device_endian endianness
;
62 /* Guest-visible constraints: */
64 /* If nonzero, specify bounds on access sizes beyond which a machine
67 unsigned min_access_size
;
68 unsigned max_access_size
;
69 /* If true, unaligned accesses are supported. Otherwise unaligned
70 * accesses throw machine checks.
74 /* Internal implementation constraints: */
76 /* If nonzero, specifies the minimum size implemented. Smaller sizes
77 * will be rounded upwards and a partial result will be returned.
79 unsigned min_access_size
;
80 /* If nonzero, specifies the maximum size implemented. Larger sizes
81 * will be done as a series of accesses with smaller sizes.
83 unsigned max_access_size
;
84 /* If true, unaligned accesses are supported. Otherwise all accesses
85 * are converted to (possibly multiple) naturally aligned accesses.
90 /* If .read and .write are not present, old_portio may be used for
91 * backwards compatibility with old portio registration
93 const MemoryRegionPortio
*old_portio
;
94 /* If .read and .write are not present, old_mmio may be used for
95 * backwards compatibility with old mmio registration
97 const MemoryRegionMmio old_mmio
;
100 typedef struct CoalescedMemoryRange CoalescedMemoryRange
;
101 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd
;
103 struct MemoryRegion
{
104 /* All fields are private - violators will be prosecuted */
105 const MemoryRegionOps
*ops
;
107 MemoryRegion
*parent
;
109 target_phys_addr_t addr
;
110 target_phys_addr_t offset
;
111 bool backend_registered
;
116 target_phys_addr_t alias_offset
;
119 QTAILQ_HEAD(subregions
, MemoryRegion
) subregions
;
120 QTAILQ_ENTRY(MemoryRegion
) subregions_link
;
121 QTAILQ_HEAD(coalesced_ranges
, CoalescedMemoryRange
) coalesced
;
123 uint8_t dirty_log_mask
;
124 unsigned ioeventfd_nb
;
125 MemoryRegionIoeventfd
*ioeventfds
;
128 struct MemoryRegionPortio
{
132 IOPortReadFunc
*read
;
133 IOPortWriteFunc
*write
;
136 #define PORTIO_END { }
139 * memory_region_init: Initialize a memory region
141 * The region typically acts as a container for other memory regions. Us
142 * memory_region_add_subregion() to add subregions.
144 * @mr: the #MemoryRegion to be initialized
145 * @name: used for debugging; not visible to the user or ABI
146 * @size: size of the region; any subregions beyond this size will be clipped
148 void memory_region_init(MemoryRegion
*mr
,
152 * memory_region_init_io: Initialize an I/O memory region.
154 * Accesses into the region will be cause the callbacks in @ops to be called.
155 * if @size is nonzero, subregions will be clipped to @size.
157 * @mr: the #MemoryRegion to be initialized.
158 * @ops: a structure containing read and write callbacks to be used when
159 * I/O is performed on the region.
160 * @opaque: passed to to the read and write callbacks of the @ops structure.
161 * @name: used for debugging; not visible to the user or ABI
162 * @size: size of the region.
164 void memory_region_init_io(MemoryRegion
*mr
,
165 const MemoryRegionOps
*ops
,
171 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
172 * region will be modify memory directly.
174 * @mr: the #MemoryRegion to be initialized.
175 * @dev: a device associated with the region; may be %NULL.
176 * @name: the name of the region; the pair (@dev, @name) must be globally
177 * unique. The name is part of the save/restore ABI and so cannot be
179 * @size: size of the region.
181 void memory_region_init_ram(MemoryRegion
*mr
,
182 DeviceState
*dev
, /* FIXME: layering violation */
187 * memory_region_init_ram: Initialize RAM memory region from a user-provided.
188 * pointer. Accesses into the region will be modify
191 * @mr: the #MemoryRegion to be initialized.
192 * @dev: a device associated with the region; may be %NULL.
193 * @name: the name of the region; the pair (@dev, @name) must be globally
194 * unique. The name is part of the save/restore ABI and so cannot be
196 * @size: size of the region.
197 * @ptr: memory to be mapped; must contain at least @size bytes.
199 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
200 DeviceState
*dev
, /* FIXME: layering violation */
206 * memory_region_init_alias: Initialize a memory region that aliases all or a
207 * part of another memory region.
209 * @mr: the #MemoryRegion to be initialized.
210 * @name: used for debugging; not visible to the user or ABI
211 * @orig: the region to be referenced; @mr will be equivalent to
212 * @orig between @offset and @offset + @size - 1.
213 * @offset: start of the section in @orig to be referenced.
214 * @size: size of the region.
216 void memory_region_init_alias(MemoryRegion
*mr
,
219 target_phys_addr_t offset
,
222 * memory_region_destroy: Destroy a memory region and relaim all resources.
224 * @mr: the region to be destroyed. May not currently be a subregion
225 * (see memory_region_add_subregion()) or referenced in an alias
226 * (see memory_region_init_alias()).
228 void memory_region_destroy(MemoryRegion
*mr
);
231 * memory_region_size: get a memory region's size.
233 * @mr: the memory region being queried.
235 uint64_t memory_region_size(MemoryRegion
*mr
);
238 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
240 * Returns a host pointer to a RAM memory region (created with
241 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
244 * @mr: the memory region being queried.
246 void *memory_region_get_ram_ptr(MemoryRegion
*mr
);
249 * memory_region_set_offset: Sets an offset to be added to MemoryRegionOps
252 * This function is deprecated and should not be used in new code.
254 void memory_region_set_offset(MemoryRegion
*mr
, target_phys_addr_t offset
);
257 * memory_region_set_log: Turn dirty logging on or off for a region.
259 * Turns dirty logging on or off for a specified client (display, migration).
260 * Only meaningful for RAM regions.
262 * @mr: the memory region being updated.
263 * @log: whether dirty logging is to be enabled or disabled.
264 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
267 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
);
270 * memory_region_get_dirty: Check whether a page is dirty for a specified
273 * Checks whether a page has been written to since the last
274 * call to memory_region_reset_dirty() with the same @client. Dirty logging
277 * @mr: the memory region being queried.
278 * @addr: the address (relative to the start of the region) being queried.
279 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
282 bool memory_region_get_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
286 * memory_region_set_dirty: Mark a page as dirty in a memory region.
288 * Marks a page as dirty, after it has been dirtied outside guest code.
290 * @mr: the memory region being queried.
291 * @addr: the address (relative to the start of the region) being dirtied.
293 void memory_region_set_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
);
296 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
297 * any external TLBs (e.g. kvm)
299 * Flushes dirty information from accelerators such as kvm and vhost-net
300 * and makes it available to users of the memory API.
302 * @mr: the region being flushed.
304 void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
);
307 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
310 * Marks a range of pages as no longer dirty.
312 * @mr: the region being updated.
313 * @addr: the start of the subrange being cleaned.
314 * @size: the size of the subrange being cleaned.
315 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
318 void memory_region_reset_dirty(MemoryRegion
*mr
, target_phys_addr_t addr
,
319 target_phys_addr_t size
, unsigned client
);
322 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
324 * Allows a memory region to be marked as read-only (turning it into a ROM).
325 * only useful on RAM regions.
327 * @mr: the region being updated.
328 * @readonly: whether rhe region is to be ROM or RAM.
330 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
);
333 * memory_region_set_coalescing: Enable memory coalescing for the region.
335 * Enabled writes to a region to be queued for later processing. MMIO ->write
336 * callbacks may be delayed until a non-coalesced MMIO is issued.
337 * Only useful for IO regions. Roughly similar to write-combining hardware.
339 * @mr: the memory region to be write coalesced
341 void memory_region_set_coalescing(MemoryRegion
*mr
);
344 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
347 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
348 * Multiple calls can be issued coalesced disjoint ranges.
350 * @mr: the memory region to be updated.
351 * @offset: the start of the range within the region to be coalesced.
352 * @size: the size of the subrange to be coalesced.
354 void memory_region_add_coalescing(MemoryRegion
*mr
,
355 target_phys_addr_t offset
,
359 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
361 * Disables any coalescing caused by memory_region_set_coalescing() or
362 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
365 * @mr: the memory region to be updated.
367 void memory_region_clear_coalescing(MemoryRegion
*mr
);
370 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
371 * is written to a location.
373 * Marks a word in an IO region (initialized with memory_region_init_io())
374 * as a trigger for an eventfd event. The I/O callback will not be called.
375 * The caller must be prepared to handle failure (hat is, take the required
376 * action if the callback _is_ called).
378 * @mr: the memory region being updated.
379 * @addr: the address within @mr that is to be monitored
380 * @size: the size of the access to trigger the eventfd
381 * @match_data: whether to match against @data, instead of just @addr
382 * @data: the data to match against the guest write
383 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
385 void memory_region_add_eventfd(MemoryRegion
*mr
,
386 target_phys_addr_t addr
,
393 * memory_region_del_eventfd: Cancel and eventfd.
395 * Cancels an eventfd trigger request by a previous memory_region_add_eventfd()
398 * @mr: the memory region being updated.
399 * @addr: the address within @mr that is to be monitored
400 * @size: the size of the access to trigger the eventfd
401 * @match_data: whether to match against @data, instead of just @addr
402 * @data: the data to match against the guest write
403 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
405 void memory_region_del_eventfd(MemoryRegion
*mr
,
406 target_phys_addr_t addr
,
412 * memory_region_add_subregion: Add a sub-region to a container.
414 * Adds a sub-region at @offset. The sub-region may not overlap with other
415 * subregions (except for those explicitly marked as overlapping). A region
416 * may only be added once as a subregion (unless removed with
417 * memory_region_del_subregion()); use memory_region_init_alias() if you
418 * want a region to be a subregion in multiple locations.
420 * @mr: the region to contain the new subregion; must be a container
421 * initialized with memory_region_init().
422 * @offset: the offset relative to @mr where @subregion is added.
423 * @subregion: the subregion to be added.
425 void memory_region_add_subregion(MemoryRegion
*mr
,
426 target_phys_addr_t offset
,
427 MemoryRegion
*subregion
);
429 * memory_region_add_subregion: Add a sub-region to a container, with overlap.
431 * Adds a sub-region at @offset. The sub-region may overlap with other
432 * subregions. Conflicts are resolved by having a higher @priority hide a
433 * lower @priority. Subregions without priority are taken as @priority 0.
434 * A region may only be added once as a subregion (unless removed with
435 * memory_region_del_subregion()); use memory_region_init_alias() if you
436 * want a region to be a subregion in multiple locations.
438 * @mr: the region to contain the new subregion; must be a container
439 * initialized with memory_region_init().
440 * @offset: the offset relative to @mr where @subregion is added.
441 * @subregion: the subregion to be added.
442 * @priority: used for resolving overlaps; highest priority wins.
444 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
445 target_phys_addr_t offset
,
446 MemoryRegion
*subregion
,
449 * memory_region_del_subregion: Remove a subregion.
451 * Removes a subregion from its container.
453 * @mr: the container to be updated.
454 * @subregion: the region being removed; must be a current subregion of @mr.
456 void memory_region_del_subregion(MemoryRegion
*mr
,
457 MemoryRegion
*subregion
);
459 /* Start a transaction; changes will be accumulated and made visible only
460 * when the transaction ends.
462 void memory_region_transaction_begin(void);
463 /* Commit a transaction and make changes visible to the guest.
465 void memory_region_transaction_commit(void);