2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
28 struct MemoryRegion
*mr
;
30 uint8_t *colo_cache
; /* For colo, VM's ram cache */
32 ram_addr_t used_length
;
33 ram_addr_t max_length
;
34 void (*resized
)(const char*, uint64_t length
, void *host
);
36 /* Protected by iothread lock. */
38 /* RCU-enabled, writes protected by the ramlist lock */
39 QLIST_ENTRY(RAMBlock
) next
;
40 QLIST_HEAD(, RAMBlockNotifier
) ramblock_notifiers
;
43 /* dirty bitmap used during migration */
45 /* bitmap of pages that haven't been sent even once
46 * only maintained and used in postcopy at the moment
47 * where it's used to send the dirtymap at the start
48 * of the postcopy phase
50 unsigned long *unsentmap
;
51 /* bitmap of already received pages in postcopy */
52 unsigned long *receivedmap
;
55 static inline bool offset_in_ramblock(RAMBlock
*b
, ram_addr_t offset
)
57 return (b
&& b
->host
&& offset
< b
->used_length
) ? true : false;
60 static inline void *ramblock_ptr(RAMBlock
*block
, ram_addr_t offset
)
62 assert(offset_in_ramblock(block
, offset
));
63 return (char *)block
->host
+ offset
;
66 static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr
,
69 uint64_t host_addr_offset
=
70 (uint64_t)(uintptr_t)(host_addr
- (void *)rb
->host
);
71 return host_addr_offset
>> TARGET_PAGE_BITS
;
74 bool ramblock_is_pmem(RAMBlock
*rb
);
76 long qemu_minrampagesize(void);
77 long qemu_maxrampagesize(void);
80 * qemu_ram_alloc_from_file,
81 * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
85 * @size: the size in bytes of the ram block
86 * @mr: the memory region where the ram block is
87 * @ram_flags: specify the properties of the ram block, which can be one
88 * or bit-or of following values
89 * - RAM_SHARED: mmap the backing file or device with MAP_SHARED
90 * - RAM_PMEM: the backend @mem_path or @fd is persistent memory
91 * Other bits are ignored.
92 * @mem_path or @fd: specify the backing file or device
93 * @errp: pointer to Error*, to store an error if it happens
96 * On success, return a pointer to the ram block.
97 * On failure, return NULL.
99 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
100 uint32_t ram_flags
, const char *mem_path
,
102 RAMBlock
*qemu_ram_alloc_from_fd(ram_addr_t size
, MemoryRegion
*mr
,
103 uint32_t ram_flags
, int fd
,
106 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
107 MemoryRegion
*mr
, Error
**errp
);
108 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, bool share
, MemoryRegion
*mr
,
110 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
111 void (*resized
)(const char*,
114 MemoryRegion
*mr
, Error
**errp
);
115 void qemu_ram_free(RAMBlock
*block
);
117 int qemu_ram_resize(RAMBlock
*block
, ram_addr_t newsize
, Error
**errp
);
119 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
120 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
122 void tb_invalidate_phys_range(ram_addr_t start
, ram_addr_t end
);
124 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
128 DirtyMemoryBlocks
*blocks
;
129 unsigned long end
, page
;
130 unsigned long idx
, offset
, base
;
133 assert(client
< DIRTY_MEMORY_NUM
);
135 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
136 page
= start
>> TARGET_PAGE_BITS
;
140 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
142 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
143 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
144 base
= page
- offset
;
146 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
147 unsigned long num
= next
- base
;
148 unsigned long found
= find_next_bit(blocks
->blocks
[idx
], num
, offset
);
157 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
165 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start
,
169 DirtyMemoryBlocks
*blocks
;
170 unsigned long end
, page
;
171 unsigned long idx
, offset
, base
;
174 assert(client
< DIRTY_MEMORY_NUM
);
176 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
177 page
= start
>> TARGET_PAGE_BITS
;
181 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
183 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
184 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
185 base
= page
- offset
;
187 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
188 unsigned long num
= next
- base
;
189 unsigned long found
= find_next_zero_bit(blocks
->blocks
[idx
], num
, offset
);
198 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
206 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
209 return cpu_physical_memory_get_dirty(addr
, 1, client
);
212 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
214 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
215 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
217 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
218 return !(vga
&& code
&& migration
);
221 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start
,
227 if (mask
& (1 << DIRTY_MEMORY_VGA
) &&
228 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_VGA
)) {
229 ret
|= (1 << DIRTY_MEMORY_VGA
);
231 if (mask
& (1 << DIRTY_MEMORY_CODE
) &&
232 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_CODE
)) {
233 ret
|= (1 << DIRTY_MEMORY_CODE
);
235 if (mask
& (1 << DIRTY_MEMORY_MIGRATION
) &&
236 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
)) {
237 ret
|= (1 << DIRTY_MEMORY_MIGRATION
);
242 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
245 unsigned long page
, idx
, offset
;
246 DirtyMemoryBlocks
*blocks
;
248 assert(client
< DIRTY_MEMORY_NUM
);
250 page
= addr
>> TARGET_PAGE_BITS
;
251 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
252 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
256 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
258 set_bit_atomic(offset
, blocks
->blocks
[idx
]);
263 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
267 DirtyMemoryBlocks
*blocks
[DIRTY_MEMORY_NUM
];
268 unsigned long end
, page
;
269 unsigned long idx
, offset
, base
;
272 if (!mask
&& !xen_enabled()) {
276 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
277 page
= start
>> TARGET_PAGE_BITS
;
281 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
282 blocks
[i
] = atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
285 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
286 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
287 base
= page
- offset
;
289 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
291 if (likely(mask
& (1 << DIRTY_MEMORY_MIGRATION
))) {
292 bitmap_set_atomic(blocks
[DIRTY_MEMORY_MIGRATION
]->blocks
[idx
],
293 offset
, next
- page
);
295 if (unlikely(mask
& (1 << DIRTY_MEMORY_VGA
))) {
296 bitmap_set_atomic(blocks
[DIRTY_MEMORY_VGA
]->blocks
[idx
],
297 offset
, next
- page
);
299 if (unlikely(mask
& (1 << DIRTY_MEMORY_CODE
))) {
300 bitmap_set_atomic(blocks
[DIRTY_MEMORY_CODE
]->blocks
[idx
],
301 offset
, next
- page
);
307 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
312 xen_hvm_modified_memory(start
, length
);
316 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
321 unsigned long page_number
, c
;
324 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
325 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
326 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
328 /* start address is aligned at the start of a word? */
329 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
331 unsigned long **blocks
[DIRTY_MEMORY_NUM
];
333 unsigned long offset
;
335 long nr
= BITS_TO_LONGS(pages
);
337 idx
= (start
>> TARGET_PAGE_BITS
) / DIRTY_MEMORY_BLOCK_SIZE
;
338 offset
= BIT_WORD((start
>> TARGET_PAGE_BITS
) %
339 DIRTY_MEMORY_BLOCK_SIZE
);
343 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
344 blocks
[i
] = atomic_rcu_read(&ram_list
.dirty_memory
[i
])->blocks
;
347 for (k
= 0; k
< nr
; k
++) {
349 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
351 atomic_or(&blocks
[DIRTY_MEMORY_MIGRATION
][idx
][offset
], temp
);
352 atomic_or(&blocks
[DIRTY_MEMORY_VGA
][idx
][offset
], temp
);
354 atomic_or(&blocks
[DIRTY_MEMORY_CODE
][idx
][offset
], temp
);
358 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
366 xen_hvm_modified_memory(start
, pages
<< TARGET_PAGE_BITS
);
368 uint8_t clients
= tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE
;
370 * bitmap-traveling is faster than memory-traveling (for addr...)
371 * especially when most of the memory is not dirty.
373 for (i
= 0; i
< len
; i
++) {
374 if (bitmap
[i
] != 0) {
375 c
= leul_to_cpu(bitmap
[i
]);
379 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
380 addr
= page_number
* TARGET_PAGE_SIZE
;
381 ram_addr
= start
+ addr
;
382 cpu_physical_memory_set_dirty_range(ram_addr
,
383 TARGET_PAGE_SIZE
* hpratio
, clients
);
389 #endif /* not _WIN32 */
391 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
395 DirtyBitmapSnapshot
*cpu_physical_memory_snapshot_and_clear_dirty
396 (ram_addr_t start
, ram_addr_t length
, unsigned client
);
398 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot
*snap
,
402 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
405 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
);
406 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_VGA
);
407 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_CODE
);
412 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock
*rb
,
415 uint64_t *real_dirty_pages
)
418 unsigned long word
= BIT_WORD((start
+ rb
->offset
) >> TARGET_PAGE_BITS
);
419 uint64_t num_dirty
= 0;
420 unsigned long *dest
= rb
->bmap
;
422 /* start address and length is aligned at the start of a word? */
423 if (((word
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) ==
424 (start
+ rb
->offset
) &&
425 !(length
& ((BITS_PER_LONG
<< TARGET_PAGE_BITS
) - 1))) {
427 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
428 unsigned long * const *src
;
429 unsigned long idx
= (word
* BITS_PER_LONG
) / DIRTY_MEMORY_BLOCK_SIZE
;
430 unsigned long offset
= BIT_WORD((word
* BITS_PER_LONG
) %
431 DIRTY_MEMORY_BLOCK_SIZE
);
432 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
436 src
= atomic_rcu_read(
437 &ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
])->blocks
;
439 for (k
= page
; k
< page
+ nr
; k
++) {
440 if (src
[idx
][offset
]) {
441 unsigned long bits
= atomic_xchg(&src
[idx
][offset
], 0);
442 unsigned long new_dirty
;
443 *real_dirty_pages
+= ctpopl(bits
);
444 new_dirty
= ~dest
[k
];
447 num_dirty
+= ctpopl(new_dirty
);
450 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
458 ram_addr_t offset
= rb
->offset
;
460 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
461 if (cpu_physical_memory_test_and_clear_dirty(
462 start
+ addr
+ offset
,
464 DIRTY_MEMORY_MIGRATION
)) {
465 *real_dirty_pages
+= 1;
466 long k
= (start
+ addr
) >> TARGET_PAGE_BITS
;
467 if (!test_and_set_bit(k
, dest
)) {