2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
24 #include "sysemu/xen.h"
25 #include "sysemu/tcg.h"
26 #include "exec/ramlist.h"
27 #include "exec/ramblock.h"
30 * clear_bmap_size: calculate clear bitmap size
32 * @pages: number of guest pages
33 * @shift: guest page number shift
35 * Returns: number of bits for the clear bitmap
37 static inline long clear_bmap_size(uint64_t pages
, uint8_t shift
)
39 return DIV_ROUND_UP(pages
, 1UL << shift
);
43 * clear_bmap_set: set clear bitmap for the page range
45 * @rb: the ramblock to operate on
46 * @start: the start page number
47 * @size: number of pages to set in the bitmap
51 static inline void clear_bmap_set(RAMBlock
*rb
, uint64_t start
,
54 uint8_t shift
= rb
->clear_bmap_shift
;
56 bitmap_set_atomic(rb
->clear_bmap
, start
>> shift
,
57 clear_bmap_size(npages
, shift
));
61 * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
63 * @rb: the ramblock to operate on
64 * @page: the page number to check
66 * Returns: true if the bit was set, false otherwise
68 static inline bool clear_bmap_test_and_clear(RAMBlock
*rb
, uint64_t page
)
70 uint8_t shift
= rb
->clear_bmap_shift
;
72 return bitmap_test_and_clear_atomic(rb
->clear_bmap
, page
>> shift
, 1);
75 static inline bool offset_in_ramblock(RAMBlock
*b
, ram_addr_t offset
)
77 return (b
&& b
->host
&& offset
< b
->used_length
) ? true : false;
80 static inline void *ramblock_ptr(RAMBlock
*block
, ram_addr_t offset
)
82 assert(offset_in_ramblock(block
, offset
));
83 return (char *)block
->host
+ offset
;
86 static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr
,
89 uint64_t host_addr_offset
=
90 (uint64_t)(uintptr_t)(host_addr
- (void *)rb
->host
);
91 return host_addr_offset
>> TARGET_PAGE_BITS
;
94 bool ramblock_is_pmem(RAMBlock
*rb
);
96 long qemu_minrampagesize(void);
97 long qemu_maxrampagesize(void);
100 * qemu_ram_alloc_from_file,
101 * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
105 * @size: the size in bytes of the ram block
106 * @mr: the memory region where the ram block is
107 * @ram_flags: specify the properties of the ram block, which can be one
108 * or bit-or of following values
109 * - RAM_SHARED: mmap the backing file or device with MAP_SHARED
110 * - RAM_PMEM: the backend @mem_path or @fd is persistent memory
111 * Other bits are ignored.
112 * @mem_path or @fd: specify the backing file or device
113 * @readonly: true to open @path for reading, false for read/write.
114 * @errp: pointer to Error*, to store an error if it happens
117 * On success, return a pointer to the ram block.
118 * On failure, return NULL.
120 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
121 uint32_t ram_flags
, const char *mem_path
,
122 bool readonly
, Error
**errp
);
123 RAMBlock
*qemu_ram_alloc_from_fd(ram_addr_t size
, MemoryRegion
*mr
,
124 uint32_t ram_flags
, int fd
, off_t offset
,
125 bool readonly
, Error
**errp
);
127 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
128 MemoryRegion
*mr
, Error
**errp
);
129 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, bool share
, MemoryRegion
*mr
,
131 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
132 void (*resized
)(const char*,
135 MemoryRegion
*mr
, Error
**errp
);
136 void qemu_ram_free(RAMBlock
*block
);
138 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
);
140 void qemu_ram_msync(RAMBlock
*block
, ram_addr_t start
, ram_addr_t length
);
142 /* Clear whole block of mem */
143 static inline void qemu_ram_block_writeback(RAMBlock
*block
)
145 qemu_ram_msync(block
, 0, block
->used_length
);
148 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
149 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
151 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
);
153 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
157 DirtyMemoryBlocks
*blocks
;
158 unsigned long end
, page
;
159 unsigned long idx
, offset
, base
;
162 assert(client
< DIRTY_MEMORY_NUM
);
164 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
165 page
= start
>> TARGET_PAGE_BITS
;
167 WITH_RCU_READ_LOCK_GUARD() {
168 blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[client
]);
170 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
171 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
172 base
= page
- offset
;
174 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
175 unsigned long num
= next
- base
;
176 unsigned long found
= find_next_bit(blocks
->blocks
[idx
],
186 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
193 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start
,
197 DirtyMemoryBlocks
*blocks
;
198 unsigned long end
, page
;
199 unsigned long idx
, offset
, base
;
202 assert(client
< DIRTY_MEMORY_NUM
);
204 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
205 page
= start
>> TARGET_PAGE_BITS
;
207 RCU_READ_LOCK_GUARD();
209 blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[client
]);
211 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
212 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
213 base
= page
- offset
;
215 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
216 unsigned long num
= next
- base
;
217 unsigned long found
= find_next_zero_bit(blocks
->blocks
[idx
], num
, offset
);
226 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
232 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
235 return cpu_physical_memory_get_dirty(addr
, 1, client
);
238 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
240 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
241 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
243 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
244 return !(vga
&& code
&& migration
);
247 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start
,
253 if (mask
& (1 << DIRTY_MEMORY_VGA
) &&
254 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_VGA
)) {
255 ret
|= (1 << DIRTY_MEMORY_VGA
);
257 if (mask
& (1 << DIRTY_MEMORY_CODE
) &&
258 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_CODE
)) {
259 ret
|= (1 << DIRTY_MEMORY_CODE
);
261 if (mask
& (1 << DIRTY_MEMORY_MIGRATION
) &&
262 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
)) {
263 ret
|= (1 << DIRTY_MEMORY_MIGRATION
);
268 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
271 unsigned long page
, idx
, offset
;
272 DirtyMemoryBlocks
*blocks
;
274 assert(client
< DIRTY_MEMORY_NUM
);
276 page
= addr
>> TARGET_PAGE_BITS
;
277 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
278 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
280 RCU_READ_LOCK_GUARD();
282 blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[client
]);
284 set_bit_atomic(offset
, blocks
->blocks
[idx
]);
287 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
291 DirtyMemoryBlocks
*blocks
[DIRTY_MEMORY_NUM
];
292 unsigned long end
, page
;
293 unsigned long idx
, offset
, base
;
296 if (!mask
&& !xen_enabled()) {
300 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
301 page
= start
>> TARGET_PAGE_BITS
;
303 WITH_RCU_READ_LOCK_GUARD() {
304 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
305 blocks
[i
] = qatomic_rcu_read(&ram_list
.dirty_memory
[i
]);
308 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
309 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
310 base
= page
- offset
;
312 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
314 if (likely(mask
& (1 << DIRTY_MEMORY_MIGRATION
))) {
315 bitmap_set_atomic(blocks
[DIRTY_MEMORY_MIGRATION
]->blocks
[idx
],
316 offset
, next
- page
);
318 if (unlikely(mask
& (1 << DIRTY_MEMORY_VGA
))) {
319 bitmap_set_atomic(blocks
[DIRTY_MEMORY_VGA
]->blocks
[idx
],
320 offset
, next
- page
);
322 if (unlikely(mask
& (1 << DIRTY_MEMORY_CODE
))) {
323 bitmap_set_atomic(blocks
[DIRTY_MEMORY_CODE
]->blocks
[idx
],
324 offset
, next
- page
);
330 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
334 xen_hvm_modified_memory(start
, length
);
338 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
343 unsigned long page_number
, c
;
346 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
347 unsigned long hpratio
= qemu_real_host_page_size
/ TARGET_PAGE_SIZE
;
348 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
350 /* start address is aligned at the start of a word? */
351 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
353 unsigned long **blocks
[DIRTY_MEMORY_NUM
];
355 unsigned long offset
;
357 long nr
= BITS_TO_LONGS(pages
);
359 idx
= (start
>> TARGET_PAGE_BITS
) / DIRTY_MEMORY_BLOCK_SIZE
;
360 offset
= BIT_WORD((start
>> TARGET_PAGE_BITS
) %
361 DIRTY_MEMORY_BLOCK_SIZE
);
363 WITH_RCU_READ_LOCK_GUARD() {
364 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
366 qatomic_rcu_read(&ram_list
.dirty_memory
[i
])->blocks
;
369 for (k
= 0; k
< nr
; k
++) {
371 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
373 qatomic_or(&blocks
[DIRTY_MEMORY_VGA
][idx
][offset
], temp
);
375 if (global_dirty_log
) {
377 &blocks
[DIRTY_MEMORY_MIGRATION
][idx
][offset
],
382 qatomic_or(&blocks
[DIRTY_MEMORY_CODE
][idx
][offset
],
387 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
394 xen_hvm_modified_memory(start
, pages
<< TARGET_PAGE_BITS
);
396 uint8_t clients
= tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE
;
398 if (!global_dirty_log
) {
399 clients
&= ~(1 << DIRTY_MEMORY_MIGRATION
);
403 * bitmap-traveling is faster than memory-traveling (for addr...)
404 * especially when most of the memory is not dirty.
406 for (i
= 0; i
< len
; i
++) {
407 if (bitmap
[i
] != 0) {
408 c
= leul_to_cpu(bitmap
[i
]);
412 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
413 addr
= page_number
* TARGET_PAGE_SIZE
;
414 ram_addr
= start
+ addr
;
415 cpu_physical_memory_set_dirty_range(ram_addr
,
416 TARGET_PAGE_SIZE
* hpratio
, clients
);
422 #endif /* not _WIN32 */
424 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
428 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
429 (MemoryRegion
*mr
, hwaddr offset
, hwaddr length
, unsigned client
);
431 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
435 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
438 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
);
439 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_VGA
);
440 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_CODE
);
444 /* Called with RCU critical section */
446 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock
*rb
,
451 unsigned long word
= BIT_WORD((start
+ rb
->offset
) >> TARGET_PAGE_BITS
);
452 uint64_t num_dirty
= 0;
453 unsigned long *dest
= rb
->bmap
;
455 /* start address and length is aligned at the start of a word? */
456 if (((word
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) ==
457 (start
+ rb
->offset
) &&
458 !(length
& ((BITS_PER_LONG
<< TARGET_PAGE_BITS
) - 1))) {
460 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
461 unsigned long * const *src
;
462 unsigned long idx
= (word
* BITS_PER_LONG
) / DIRTY_MEMORY_BLOCK_SIZE
;
463 unsigned long offset
= BIT_WORD((word
* BITS_PER_LONG
) %
464 DIRTY_MEMORY_BLOCK_SIZE
);
465 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
467 src
= qatomic_rcu_read(
468 &ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
])->blocks
;
470 for (k
= page
; k
< page
+ nr
; k
++) {
471 if (src
[idx
][offset
]) {
472 unsigned long bits
= qatomic_xchg(&src
[idx
][offset
], 0);
473 unsigned long new_dirty
;
474 new_dirty
= ~dest
[k
];
477 num_dirty
+= ctpopl(new_dirty
);
480 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
486 if (rb
->clear_bmap
) {
488 * Postpone the dirty bitmap clear to the point before we
489 * really send the pages, also we will split the clear
490 * dirty procedure into smaller chunks.
492 clear_bmap_set(rb
, start
>> TARGET_PAGE_BITS
,
493 length
>> TARGET_PAGE_BITS
);
495 /* Slow path - still do that in a huge chunk */
496 memory_region_clear_dirty_bitmap(rb
->mr
, start
, length
);
499 ram_addr_t offset
= rb
->offset
;
501 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
502 if (cpu_physical_memory_test_and_clear_dirty(
503 start
+ addr
+ offset
,
505 DIRTY_MEMORY_MIGRATION
)) {
506 long k
= (start
+ addr
) >> TARGET_PAGE_BITS
;
507 if (!test_and_set_bit(k
, dest
)) {