2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
24 #include "sysemu/xen.h"
25 #include "sysemu/tcg.h"
26 #include "exec/ramlist.h"
27 #include "exec/ramblock.h"
29 extern uint64_t total_dirty_pages
;
32 * clear_bmap_size: calculate clear bitmap size
34 * @pages: number of guest pages
35 * @shift: guest page number shift
37 * Returns: number of bits for the clear bitmap
39 static inline long clear_bmap_size(uint64_t pages
, uint8_t shift
)
41 return DIV_ROUND_UP(pages
, 1UL << shift
);
45 * clear_bmap_set: set clear bitmap for the page range
47 * @rb: the ramblock to operate on
48 * @start: the start page number
49 * @size: number of pages to set in the bitmap
53 static inline void clear_bmap_set(RAMBlock
*rb
, uint64_t start
,
56 uint8_t shift
= rb
->clear_bmap_shift
;
58 bitmap_set_atomic(rb
->clear_bmap
, start
>> shift
,
59 clear_bmap_size(npages
, shift
));
63 * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
65 * @rb: the ramblock to operate on
66 * @page: the page number to check
68 * Returns: true if the bit was set, false otherwise
70 static inline bool clear_bmap_test_and_clear(RAMBlock
*rb
, uint64_t page
)
72 uint8_t shift
= rb
->clear_bmap_shift
;
74 return bitmap_test_and_clear_atomic(rb
->clear_bmap
, page
>> shift
, 1);
77 static inline bool offset_in_ramblock(RAMBlock
*b
, ram_addr_t offset
)
79 return (b
&& b
->host
&& offset
< b
->used_length
) ? true : false;
82 static inline void *ramblock_ptr(RAMBlock
*block
, ram_addr_t offset
)
84 assert(offset_in_ramblock(block
, offset
));
85 return (char *)block
->host
+ offset
;
88 static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr
,
91 uint64_t host_addr_offset
=
92 (uint64_t)(uintptr_t)(host_addr
- (void *)rb
->host
);
93 return host_addr_offset
>> TARGET_PAGE_BITS
;
96 bool ramblock_is_pmem(RAMBlock
*rb
);
98 long qemu_minrampagesize(void);
99 long qemu_maxrampagesize(void);
102 * qemu_ram_alloc_from_file,
103 * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
107 * @size: the size in bytes of the ram block
108 * @mr: the memory region where the ram block is
109 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
111 * @mem_path or @fd: specify the backing file or device
112 * @readonly: true to open @path for reading, false for read/write.
113 * @errp: pointer to Error*, to store an error if it happens
116 * On success, return a pointer to the ram block.
117 * On failure, return NULL.
119 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
120 uint32_t ram_flags
, const char *mem_path
,
121 bool readonly
, Error
**errp
);
122 RAMBlock
*qemu_ram_alloc_from_fd(ram_addr_t size
, MemoryRegion
*mr
,
123 uint32_t ram_flags
, int fd
, off_t offset
,
124 bool readonly
, Error
**errp
);
126 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
127 MemoryRegion
*mr
, Error
**errp
);
128 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, uint32_t ram_flags
, MemoryRegion
*mr
,
130 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
131 void (*resized
)(const char*,
134 MemoryRegion
*mr
, Error
**errp
);
135 void qemu_ram_free(RAMBlock
*block
);
137 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
);
139 void qemu_ram_msync(RAMBlock
*block
, ram_addr_t start
, ram_addr_t length
);
141 /* Clear whole block of mem */
142 static inline void qemu_ram_block_writeback(RAMBlock
*block
)
144 qemu_ram_msync(block
, 0, block
->used_length
);
147 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
148 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
150 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
);
152 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
156 DirtyMemoryBlocks
*blocks
;
157 unsigned long end
, page
;
158 unsigned long idx
, offset
, base
;
161 assert(client
< DIRTY_MEMORY_NUM
);
163 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
164 page
= start
>> TARGET_PAGE_BITS
;
166 WITH_RCU_READ_LOCK_GUARD() {
167 blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[client
]);
169 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
170 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
171 base
= page
- offset
;
173 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
174 unsigned long num
= next
- base
;
175 unsigned long found
= find_next_bit(blocks
->blocks
[idx
],
185 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
192 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start
,
196 DirtyMemoryBlocks
*blocks
;
197 unsigned long end
, page
;
198 unsigned long idx
, offset
, base
;
201 assert(client
< DIRTY_MEMORY_NUM
);
203 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
204 page
= start
>> TARGET_PAGE_BITS
;
206 RCU_READ_LOCK_GUARD();
208 blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[client
]);
210 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
211 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
212 base
= page
- offset
;
214 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
215 unsigned long num
= next
- base
;
216 unsigned long found
= find_next_zero_bit(blocks
->blocks
[idx
], num
, offset
);
225 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
231 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
234 return cpu_physical_memory_get_dirty(addr
, 1, client
);
237 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
239 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
240 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
242 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
243 return !(vga
&& code
&& migration
);
246 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start
,
252 if (mask
& (1 << DIRTY_MEMORY_VGA
) &&
253 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_VGA
)) {
254 ret
|= (1 << DIRTY_MEMORY_VGA
);
256 if (mask
& (1 << DIRTY_MEMORY_CODE
) &&
257 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_CODE
)) {
258 ret
|= (1 << DIRTY_MEMORY_CODE
);
260 if (mask
& (1 << DIRTY_MEMORY_MIGRATION
) &&
261 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
)) {
262 ret
|= (1 << DIRTY_MEMORY_MIGRATION
);
267 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
270 unsigned long page
, idx
, offset
;
271 DirtyMemoryBlocks
*blocks
;
273 assert(client
< DIRTY_MEMORY_NUM
);
275 page
= addr
>> TARGET_PAGE_BITS
;
276 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
277 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
279 RCU_READ_LOCK_GUARD();
281 blocks
= qatomic_rcu_read(&ram_list
.dirty_memory
[client
]);
283 set_bit_atomic(offset
, blocks
->blocks
[idx
]);
286 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
290 DirtyMemoryBlocks
*blocks
[DIRTY_MEMORY_NUM
];
291 unsigned long end
, page
;
292 unsigned long idx
, offset
, base
;
295 if (!mask
&& !xen_enabled()) {
299 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
300 page
= start
>> TARGET_PAGE_BITS
;
302 WITH_RCU_READ_LOCK_GUARD() {
303 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
304 blocks
[i
] = qatomic_rcu_read(&ram_list
.dirty_memory
[i
]);
307 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
308 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
309 base
= page
- offset
;
311 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
313 if (likely(mask
& (1 << DIRTY_MEMORY_MIGRATION
))) {
314 bitmap_set_atomic(blocks
[DIRTY_MEMORY_MIGRATION
]->blocks
[idx
],
315 offset
, next
- page
);
317 if (unlikely(mask
& (1 << DIRTY_MEMORY_VGA
))) {
318 bitmap_set_atomic(blocks
[DIRTY_MEMORY_VGA
]->blocks
[idx
],
319 offset
, next
- page
);
321 if (unlikely(mask
& (1 << DIRTY_MEMORY_CODE
))) {
322 bitmap_set_atomic(blocks
[DIRTY_MEMORY_CODE
]->blocks
[idx
],
323 offset
, next
- page
);
329 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
333 xen_hvm_modified_memory(start
, length
);
337 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
342 unsigned long page_number
, c
;
345 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
346 unsigned long hpratio
= qemu_real_host_page_size() / TARGET_PAGE_SIZE
;
347 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
349 /* start address is aligned at the start of a word? */
350 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
352 unsigned long **blocks
[DIRTY_MEMORY_NUM
];
354 unsigned long offset
;
356 long nr
= BITS_TO_LONGS(pages
);
358 idx
= (start
>> TARGET_PAGE_BITS
) / DIRTY_MEMORY_BLOCK_SIZE
;
359 offset
= BIT_WORD((start
>> TARGET_PAGE_BITS
) %
360 DIRTY_MEMORY_BLOCK_SIZE
);
362 WITH_RCU_READ_LOCK_GUARD() {
363 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
365 qatomic_rcu_read(&ram_list
.dirty_memory
[i
])->blocks
;
368 for (k
= 0; k
< nr
; k
++) {
370 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
372 qatomic_or(&blocks
[DIRTY_MEMORY_VGA
][idx
][offset
], temp
);
374 if (global_dirty_tracking
) {
376 &blocks
[DIRTY_MEMORY_MIGRATION
][idx
][offset
],
379 global_dirty_tracking
& GLOBAL_DIRTY_DIRTY_RATE
)) {
380 total_dirty_pages
+= ctpopl(temp
);
385 qatomic_or(&blocks
[DIRTY_MEMORY_CODE
][idx
][offset
],
390 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
397 xen_hvm_modified_memory(start
, pages
<< TARGET_PAGE_BITS
);
399 uint8_t clients
= tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE
;
401 if (!global_dirty_tracking
) {
402 clients
&= ~(1 << DIRTY_MEMORY_MIGRATION
);
406 * bitmap-traveling is faster than memory-traveling (for addr...)
407 * especially when most of the memory is not dirty.
409 for (i
= 0; i
< len
; i
++) {
410 if (bitmap
[i
] != 0) {
411 c
= leul_to_cpu(bitmap
[i
]);
412 if (unlikely(global_dirty_tracking
& GLOBAL_DIRTY_DIRTY_RATE
)) {
413 total_dirty_pages
+= ctpopl(c
);
418 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
419 addr
= page_number
* TARGET_PAGE_SIZE
;
420 ram_addr
= start
+ addr
;
421 cpu_physical_memory_set_dirty_range(ram_addr
,
422 TARGET_PAGE_SIZE
* hpratio
, clients
);
428 #endif /* not _WIN32 */
430 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
434 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
435 (MemoryRegion
*mr
, hwaddr offset
, hwaddr length
, unsigned client
);
437 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
441 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
444 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
);
445 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_VGA
);
446 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_CODE
);
450 /* Called with RCU critical section */
452 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock
*rb
,
457 unsigned long word
= BIT_WORD((start
+ rb
->offset
) >> TARGET_PAGE_BITS
);
458 uint64_t num_dirty
= 0;
459 unsigned long *dest
= rb
->bmap
;
461 /* start address and length is aligned at the start of a word? */
462 if (((word
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) ==
463 (start
+ rb
->offset
) &&
464 !(length
& ((BITS_PER_LONG
<< TARGET_PAGE_BITS
) - 1))) {
466 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
467 unsigned long * const *src
;
468 unsigned long idx
= (word
* BITS_PER_LONG
) / DIRTY_MEMORY_BLOCK_SIZE
;
469 unsigned long offset
= BIT_WORD((word
* BITS_PER_LONG
) %
470 DIRTY_MEMORY_BLOCK_SIZE
);
471 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
473 src
= qatomic_rcu_read(
474 &ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
])->blocks
;
476 for (k
= page
; k
< page
+ nr
; k
++) {
477 if (src
[idx
][offset
]) {
478 unsigned long bits
= qatomic_xchg(&src
[idx
][offset
], 0);
479 unsigned long new_dirty
;
480 new_dirty
= ~dest
[k
];
483 num_dirty
+= ctpopl(new_dirty
);
486 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
492 if (rb
->clear_bmap
) {
494 * Postpone the dirty bitmap clear to the point before we
495 * really send the pages, also we will split the clear
496 * dirty procedure into smaller chunks.
498 clear_bmap_set(rb
, start
>> TARGET_PAGE_BITS
,
499 length
>> TARGET_PAGE_BITS
);
501 /* Slow path - still do that in a huge chunk */
502 memory_region_clear_dirty_bitmap(rb
->mr
, start
, length
);
505 ram_addr_t offset
= rb
->offset
;
507 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
508 if (cpu_physical_memory_test_and_clear_dirty(
509 start
+ addr
+ offset
,
511 DIRTY_MEMORY_MIGRATION
)) {
512 long k
= (start
+ addr
) >> TARGET_PAGE_BITS
;
513 if (!test_and_set_bit(k
, dest
)) {