2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
24 #include "hw/xen/xen.h"
25 #include "sysemu/tcg.h"
26 #include "exec/ramlist.h"
30 struct MemoryRegion
*mr
;
32 uint8_t *colo_cache
; /* For colo, VM's ram cache */
34 ram_addr_t used_length
;
35 ram_addr_t max_length
;
36 void (*resized
)(const char*, uint64_t length
, void *host
);
38 /* Protected by iothread lock. */
40 /* RCU-enabled, writes protected by the ramlist lock */
41 QLIST_ENTRY(RAMBlock
) next
;
42 QLIST_HEAD(, RAMBlockNotifier
) ramblock_notifiers
;
45 /* dirty bitmap used during migration */
47 /* bitmap of pages that haven't been sent even once
48 * only maintained and used in postcopy at the moment
49 * where it's used to send the dirtymap at the start
50 * of the postcopy phase
52 unsigned long *unsentmap
;
53 /* bitmap of already received pages in postcopy */
54 unsigned long *receivedmap
;
57 * bitmap to track already cleared dirty bitmap. When the bit is
58 * set, it means the corresponding memory chunk needs a log-clear.
59 * Set this up to non-NULL to enable the capability to postpone
60 * and split clearing of dirty bitmap on the remote node (e.g.,
61 * KVM). The bitmap will be set only when doing global sync.
63 * NOTE: this bitmap is different comparing to the other bitmaps
64 * in that one bit can represent multiple guest pages (which is
65 * decided by the `clear_bmap_shift' variable below). On
66 * destination side, this should always be NULL, and the variable
67 * `clear_bmap_shift' is meaningless.
69 unsigned long *clear_bmap
;
70 uint8_t clear_bmap_shift
;
74 * clear_bmap_size: calculate clear bitmap size
76 * @pages: number of guest pages
77 * @shift: guest page number shift
79 * Returns: number of bits for the clear bitmap
81 static inline long clear_bmap_size(uint64_t pages
, uint8_t shift
)
83 return DIV_ROUND_UP(pages
, 1UL << shift
);
87 * clear_bmap_set: set clear bitmap for the page range
89 * @rb: the ramblock to operate on
90 * @start: the start page number
91 * @size: number of pages to set in the bitmap
95 static inline void clear_bmap_set(RAMBlock
*rb
, uint64_t start
,
98 uint8_t shift
= rb
->clear_bmap_shift
;
100 bitmap_set_atomic(rb
->clear_bmap
, start
>> shift
,
101 clear_bmap_size(npages
, shift
));
105 * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
107 * @rb: the ramblock to operate on
108 * @page: the page number to check
110 * Returns: true if the bit was set, false otherwise
112 static inline bool clear_bmap_test_and_clear(RAMBlock
*rb
, uint64_t page
)
114 uint8_t shift
= rb
->clear_bmap_shift
;
116 return bitmap_test_and_clear_atomic(rb
->clear_bmap
, page
>> shift
, 1);
119 static inline bool offset_in_ramblock(RAMBlock
*b
, ram_addr_t offset
)
121 return (b
&& b
->host
&& offset
< b
->used_length
) ? true : false;
124 static inline void *ramblock_ptr(RAMBlock
*block
, ram_addr_t offset
)
126 assert(offset_in_ramblock(block
, offset
));
127 return (char *)block
->host
+ offset
;
130 static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr
,
133 uint64_t host_addr_offset
=
134 (uint64_t)(uintptr_t)(host_addr
- (void *)rb
->host
);
135 return host_addr_offset
>> TARGET_PAGE_BITS
;
138 bool ramblock_is_pmem(RAMBlock
*rb
);
140 long qemu_minrampagesize(void);
141 long qemu_maxrampagesize(void);
144 * qemu_ram_alloc_from_file,
145 * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
149 * @size: the size in bytes of the ram block
150 * @mr: the memory region where the ram block is
151 * @ram_flags: specify the properties of the ram block, which can be one
152 * or bit-or of following values
153 * - RAM_SHARED: mmap the backing file or device with MAP_SHARED
154 * - RAM_PMEM: the backend @mem_path or @fd is persistent memory
155 * Other bits are ignored.
156 * @mem_path or @fd: specify the backing file or device
157 * @errp: pointer to Error*, to store an error if it happens
160 * On success, return a pointer to the ram block.
161 * On failure, return NULL.
163 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
164 uint32_t ram_flags
, const char *mem_path
,
166 RAMBlock
*qemu_ram_alloc_from_fd(ram_addr_t size
, MemoryRegion
*mr
,
167 uint32_t ram_flags
, int fd
,
170 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
171 MemoryRegion
*mr
, Error
**errp
);
172 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, bool share
, MemoryRegion
*mr
,
174 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
175 void (*resized
)(const char*,
178 MemoryRegion
*mr
, Error
**errp
);
179 void qemu_ram_free(RAMBlock
*block
);
181 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
);
183 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
184 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
186 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
);
188 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
192 DirtyMemoryBlocks
*blocks
;
193 unsigned long end
, page
;
194 unsigned long idx
, offset
, base
;
197 assert(client
< DIRTY_MEMORY_NUM
);
199 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
200 page
= start
>> TARGET_PAGE_BITS
;
204 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
206 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
207 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
208 base
= page
- offset
;
210 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
211 unsigned long num
= next
- base
;
212 unsigned long found
= find_next_bit(blocks
->blocks
[idx
], num
, offset
);
221 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
229 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start
,
233 DirtyMemoryBlocks
*blocks
;
234 unsigned long end
, page
;
235 unsigned long idx
, offset
, base
;
238 assert(client
< DIRTY_MEMORY_NUM
);
240 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
241 page
= start
>> TARGET_PAGE_BITS
;
245 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
247 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
248 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
249 base
= page
- offset
;
251 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
252 unsigned long num
= next
- base
;
253 unsigned long found
= find_next_zero_bit(blocks
->blocks
[idx
], num
, offset
);
262 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
270 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
273 return cpu_physical_memory_get_dirty(addr
, 1, client
);
276 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
278 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
279 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
281 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
282 return !(vga
&& code
&& migration
);
285 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start
,
291 if (mask
& (1 << DIRTY_MEMORY_VGA
) &&
292 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_VGA
)) {
293 ret
|= (1 << DIRTY_MEMORY_VGA
);
295 if (mask
& (1 << DIRTY_MEMORY_CODE
) &&
296 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_CODE
)) {
297 ret
|= (1 << DIRTY_MEMORY_CODE
);
299 if (mask
& (1 << DIRTY_MEMORY_MIGRATION
) &&
300 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
)) {
301 ret
|= (1 << DIRTY_MEMORY_MIGRATION
);
306 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
309 unsigned long page
, idx
, offset
;
310 DirtyMemoryBlocks
*blocks
;
312 assert(client
< DIRTY_MEMORY_NUM
);
314 page
= addr
>> TARGET_PAGE_BITS
;
315 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
316 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
320 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
322 set_bit_atomic(offset
, blocks
->blocks
[idx
]);
327 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
331 DirtyMemoryBlocks
*blocks
[DIRTY_MEMORY_NUM
];
332 unsigned long end
, page
;
333 unsigned long idx
, offset
, base
;
336 if (!mask
&& !xen_enabled()) {
340 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
341 page
= start
>> TARGET_PAGE_BITS
;
345 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
346 blocks
[i
] = atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
349 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
350 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
351 base
= page
- offset
;
353 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
355 if (likely(mask
& (1 << DIRTY_MEMORY_MIGRATION
))) {
356 bitmap_set_atomic(blocks
[DIRTY_MEMORY_MIGRATION
]->blocks
[idx
],
357 offset
, next
- page
);
359 if (unlikely(mask
& (1 << DIRTY_MEMORY_VGA
))) {
360 bitmap_set_atomic(blocks
[DIRTY_MEMORY_VGA
]->blocks
[idx
],
361 offset
, next
- page
);
363 if (unlikely(mask
& (1 << DIRTY_MEMORY_CODE
))) {
364 bitmap_set_atomic(blocks
[DIRTY_MEMORY_CODE
]->blocks
[idx
],
365 offset
, next
- page
);
371 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
376 xen_hvm_modified_memory(start
, length
);
380 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
385 unsigned long page_number
, c
;
388 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
389 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
390 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
392 /* start address is aligned at the start of a word? */
393 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
395 unsigned long **blocks
[DIRTY_MEMORY_NUM
];
397 unsigned long offset
;
399 long nr
= BITS_TO_LONGS(pages
);
401 idx
= (start
>> TARGET_PAGE_BITS
) / DIRTY_MEMORY_BLOCK_SIZE
;
402 offset
= BIT_WORD((start
>> TARGET_PAGE_BITS
) %
403 DIRTY_MEMORY_BLOCK_SIZE
);
407 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
408 blocks
[i
] = atomic_rcu_read(&ram_list
.dirty_memory
[i
])->blocks
;
411 for (k
= 0; k
< nr
; k
++) {
413 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
415 atomic_or(&blocks
[DIRTY_MEMORY_VGA
][idx
][offset
], temp
);
417 if (global_dirty_log
) {
418 atomic_or(&blocks
[DIRTY_MEMORY_MIGRATION
][idx
][offset
],
423 atomic_or(&blocks
[DIRTY_MEMORY_CODE
][idx
][offset
], temp
);
427 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
435 xen_hvm_modified_memory(start
, pages
<< TARGET_PAGE_BITS
);
437 uint8_t clients
= tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE
;
439 if (!global_dirty_log
) {
440 clients
&= ~(1 << DIRTY_MEMORY_MIGRATION
);
444 * bitmap-traveling is faster than memory-traveling (for addr...)
445 * especially when most of the memory is not dirty.
447 for (i
= 0; i
< len
; i
++) {
448 if (bitmap
[i
] != 0) {
449 c
= leul_to_cpu(bitmap
[i
]);
453 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
454 addr
= page_number
* TARGET_PAGE_SIZE
;
455 ram_addr
= start
+ addr
;
456 cpu_physical_memory_set_dirty_range(ram_addr
,
457 TARGET_PAGE_SIZE
* hpratio
, clients
);
463 #endif /* not _WIN32 */
465 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
469 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
470 (MemoryRegion
*mr
, hwaddr offset
, hwaddr length
, unsigned client
);
472 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
476 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
479 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
);
480 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_VGA
);
481 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_CODE
);
485 /* Called with RCU critical section */
487 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock
*rb
,
490 uint64_t *real_dirty_pages
)
493 unsigned long word
= BIT_WORD((start
+ rb
->offset
) >> TARGET_PAGE_BITS
);
494 uint64_t num_dirty
= 0;
495 unsigned long *dest
= rb
->bmap
;
497 /* start address and length is aligned at the start of a word? */
498 if (((word
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) ==
499 (start
+ rb
->offset
) &&
500 !(length
& ((BITS_PER_LONG
<< TARGET_PAGE_BITS
) - 1))) {
502 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
503 unsigned long * const *src
;
504 unsigned long idx
= (word
* BITS_PER_LONG
) / DIRTY_MEMORY_BLOCK_SIZE
;
505 unsigned long offset
= BIT_WORD((word
* BITS_PER_LONG
) %
506 DIRTY_MEMORY_BLOCK_SIZE
);
507 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
509 src
= atomic_rcu_read(
510 &ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
])->blocks
;
512 for (k
= page
; k
< page
+ nr
; k
++) {
513 if (src
[idx
][offset
]) {
514 unsigned long bits
= atomic_xchg(&src
[idx
][offset
], 0);
515 unsigned long new_dirty
;
516 *real_dirty_pages
+= ctpopl(bits
);
517 new_dirty
= ~dest
[k
];
520 num_dirty
+= ctpopl(new_dirty
);
523 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
529 if (rb
->clear_bmap
) {
531 * Postpone the dirty bitmap clear to the point before we
532 * really send the pages, also we will split the clear
533 * dirty procedure into smaller chunks.
535 clear_bmap_set(rb
, start
>> TARGET_PAGE_BITS
,
536 length
>> TARGET_PAGE_BITS
);
538 /* Slow path - still do that in a huge chunk */
539 memory_region_clear_dirty_bitmap(rb
->mr
, start
, length
);
542 ram_addr_t offset
= rb
->offset
;
544 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
545 if (cpu_physical_memory_test_and_clear_dirty(
546 start
+ addr
+ offset
,
548 DIRTY_MEMORY_MIGRATION
)) {
549 *real_dirty_pages
+= 1;
550 long k
= (start
+ addr
) >> TARGET_PAGE_BITS
;
551 if (!test_and_set_bit(k
, dest
)) {