2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
27 struct MemoryRegion
*mr
;
30 ram_addr_t used_length
;
31 ram_addr_t max_length
;
32 void (*resized
)(const char*, uint64_t length
, void *host
);
34 /* Protected by iothread lock. */
36 /* RCU-enabled, writes protected by the ramlist lock */
37 QLIST_ENTRY(RAMBlock
) next
;
41 static inline bool offset_in_ramblock(RAMBlock
*b
, ram_addr_t offset
)
43 return (b
&& b
->host
&& offset
< b
->used_length
) ? true : false;
46 static inline void *ramblock_ptr(RAMBlock
*block
, ram_addr_t offset
)
48 assert(offset_in_ramblock(block
, offset
));
49 return (char *)block
->host
+ offset
;
52 /* The dirty memory bitmap is split into fixed-size blocks to allow growth
53 * under RCU. The bitmap for a block can be accessed as follows:
57 * DirtyMemoryBlocks *blocks =
58 * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
60 * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
61 * unsigned long *block = blocks.blocks[idx];
62 * ...access block bitmap...
66 * Remember to check for the end of the block when accessing a range of
67 * addresses. Move on to the next block if you reach the end.
69 * Organization into blocks allows dirty memory to grow (but not shrink) under
70 * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
71 * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
72 * the same. Other threads can safely access existing blocks while dirty
73 * memory is being grown. When no threads are using the old DirtyMemoryBlocks
74 * anymore it is freed by RCU (but the underlying blocks stay because they are
75 * pointed to from the new DirtyMemoryBlocks).
77 #define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
80 unsigned long *blocks
[];
83 typedef struct RAMList
{
86 /* RCU-enabled, writes protected by the ramlist lock. */
87 QLIST_HEAD(, RAMBlock
) blocks
;
88 DirtyMemoryBlocks
*dirty_memory
[DIRTY_MEMORY_NUM
];
91 extern RAMList ram_list
;
93 ram_addr_t
last_ram_offset(void);
94 void qemu_mutex_lock_ramlist(void);
95 void qemu_mutex_unlock_ramlist(void);
97 RAMBlock
*qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
98 bool share
, const char *mem_path
,
100 RAMBlock
*qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
101 MemoryRegion
*mr
, Error
**errp
);
102 RAMBlock
*qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
);
103 RAMBlock
*qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
104 void (*resized
)(const char*,
107 MemoryRegion
*mr
, Error
**errp
);
108 int qemu_get_ram_fd(ram_addr_t addr
);
109 void qemu_set_ram_fd(ram_addr_t addr
, int fd
);
110 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
);
111 void qemu_ram_free(RAMBlock
*block
);
113 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
);
115 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
116 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
118 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
122 DirtyMemoryBlocks
*blocks
;
123 unsigned long end
, page
;
124 unsigned long idx
, offset
, base
;
127 assert(client
< DIRTY_MEMORY_NUM
);
129 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
130 page
= start
>> TARGET_PAGE_BITS
;
134 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
136 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
137 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
138 base
= page
- offset
;
140 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
141 unsigned long num
= next
- base
;
142 unsigned long found
= find_next_bit(blocks
->blocks
[idx
], num
, offset
);
151 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
159 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start
,
163 DirtyMemoryBlocks
*blocks
;
164 unsigned long end
, page
;
165 unsigned long idx
, offset
, base
;
168 assert(client
< DIRTY_MEMORY_NUM
);
170 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
171 page
= start
>> TARGET_PAGE_BITS
;
175 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
177 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
178 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
179 base
= page
- offset
;
181 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
182 unsigned long num
= next
- base
;
183 unsigned long found
= find_next_zero_bit(blocks
->blocks
[idx
], num
, offset
);
192 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
200 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
203 return cpu_physical_memory_get_dirty(addr
, 1, client
);
206 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
208 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
209 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
211 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
212 return !(vga
&& code
&& migration
);
215 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start
,
221 if (mask
& (1 << DIRTY_MEMORY_VGA
) &&
222 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_VGA
)) {
223 ret
|= (1 << DIRTY_MEMORY_VGA
);
225 if (mask
& (1 << DIRTY_MEMORY_CODE
) &&
226 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_CODE
)) {
227 ret
|= (1 << DIRTY_MEMORY_CODE
);
229 if (mask
& (1 << DIRTY_MEMORY_MIGRATION
) &&
230 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
)) {
231 ret
|= (1 << DIRTY_MEMORY_MIGRATION
);
236 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
239 unsigned long page
, idx
, offset
;
240 DirtyMemoryBlocks
*blocks
;
242 assert(client
< DIRTY_MEMORY_NUM
);
244 page
= addr
>> TARGET_PAGE_BITS
;
245 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
246 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
250 blocks
= atomic_rcu_read(&ram_list
.dirty_memory
[client
]);
252 set_bit_atomic(offset
, blocks
->blocks
[idx
]);
257 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
261 DirtyMemoryBlocks
*blocks
[DIRTY_MEMORY_NUM
];
262 unsigned long end
, page
;
263 unsigned long idx
, offset
, base
;
266 if (!mask
&& !xen_enabled()) {
270 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
271 page
= start
>> TARGET_PAGE_BITS
;
275 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
276 blocks
[i
] = atomic_rcu_read(&ram_list
.dirty_memory
[i
]);
279 idx
= page
/ DIRTY_MEMORY_BLOCK_SIZE
;
280 offset
= page
% DIRTY_MEMORY_BLOCK_SIZE
;
281 base
= page
- offset
;
283 unsigned long next
= MIN(end
, base
+ DIRTY_MEMORY_BLOCK_SIZE
);
285 if (likely(mask
& (1 << DIRTY_MEMORY_MIGRATION
))) {
286 bitmap_set_atomic(blocks
[DIRTY_MEMORY_MIGRATION
]->blocks
[idx
],
287 offset
, next
- page
);
289 if (unlikely(mask
& (1 << DIRTY_MEMORY_VGA
))) {
290 bitmap_set_atomic(blocks
[DIRTY_MEMORY_VGA
]->blocks
[idx
],
291 offset
, next
- page
);
293 if (unlikely(mask
& (1 << DIRTY_MEMORY_CODE
))) {
294 bitmap_set_atomic(blocks
[DIRTY_MEMORY_CODE
]->blocks
[idx
],
295 offset
, next
- page
);
301 base
+= DIRTY_MEMORY_BLOCK_SIZE
;
306 xen_modified_memory(start
, length
);
310 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
315 unsigned long page_number
, c
;
318 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
319 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
320 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
322 /* start address is aligned at the start of a word? */
323 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
325 unsigned long **blocks
[DIRTY_MEMORY_NUM
];
327 unsigned long offset
;
329 long nr
= BITS_TO_LONGS(pages
);
331 idx
= (start
>> TARGET_PAGE_BITS
) / DIRTY_MEMORY_BLOCK_SIZE
;
332 offset
= BIT_WORD((start
>> TARGET_PAGE_BITS
) %
333 DIRTY_MEMORY_BLOCK_SIZE
);
337 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
338 blocks
[i
] = atomic_rcu_read(&ram_list
.dirty_memory
[i
])->blocks
;
341 for (k
= 0; k
< nr
; k
++) {
343 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
345 atomic_or(&blocks
[DIRTY_MEMORY_MIGRATION
][idx
][offset
], temp
);
346 atomic_or(&blocks
[DIRTY_MEMORY_VGA
][idx
][offset
], temp
);
348 atomic_or(&blocks
[DIRTY_MEMORY_CODE
][idx
][offset
], temp
);
352 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
360 xen_modified_memory(start
, pages
<< TARGET_PAGE_BITS
);
362 uint8_t clients
= tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE
;
364 * bitmap-traveling is faster than memory-traveling (for addr...)
365 * especially when most of the memory is not dirty.
367 for (i
= 0; i
< len
; i
++) {
368 if (bitmap
[i
] != 0) {
369 c
= leul_to_cpu(bitmap
[i
]);
373 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
374 addr
= page_number
* TARGET_PAGE_SIZE
;
375 ram_addr
= start
+ addr
;
376 cpu_physical_memory_set_dirty_range(ram_addr
,
377 TARGET_PAGE_SIZE
* hpratio
, clients
);
383 #endif /* not _WIN32 */
385 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
389 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
392 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
);
393 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_VGA
);
394 cpu_physical_memory_test_and_clear_dirty(start
, length
, DIRTY_MEMORY_CODE
);
399 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest
,
404 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
405 uint64_t num_dirty
= 0;
407 /* start address is aligned at the start of a word? */
408 if (((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) {
410 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
411 unsigned long * const *src
;
412 unsigned long idx
= (page
* BITS_PER_LONG
) / DIRTY_MEMORY_BLOCK_SIZE
;
413 unsigned long offset
= BIT_WORD((page
* BITS_PER_LONG
) %
414 DIRTY_MEMORY_BLOCK_SIZE
);
418 src
= atomic_rcu_read(
419 &ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
])->blocks
;
421 for (k
= page
; k
< page
+ nr
; k
++) {
422 if (src
[idx
][offset
]) {
423 unsigned long bits
= atomic_xchg(&src
[idx
][offset
], 0);
424 unsigned long new_dirty
;
425 new_dirty
= ~dest
[k
];
428 num_dirty
+= ctpopl(new_dirty
);
431 if (++offset
>= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE
)) {
439 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
440 if (cpu_physical_memory_test_and_clear_dirty(
443 DIRTY_MEMORY_MIGRATION
)) {
444 long k
= (start
+ addr
) >> TARGET_PAGE_BITS
;
445 if (!test_and_set_bit(k
, dest
)) {
455 void migration_bitmap_extend(ram_addr_t old
, ram_addr_t
new);