2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
25 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
26 bool share
, const char *mem_path
,
28 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
29 MemoryRegion
*mr
, Error
**errp
);
30 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
);
31 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
32 void (*resized
)(const char*,
35 MemoryRegion
*mr
, Error
**errp
);
36 int qemu_get_ram_fd(ram_addr_t addr
);
37 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
);
38 void *qemu_get_ram_ptr(ram_addr_t addr
);
39 void qemu_ram_free(ram_addr_t addr
);
40 void qemu_ram_free_from_ptr(ram_addr_t addr
);
42 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
);
44 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
45 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
47 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
51 unsigned long end
, page
, next
;
53 assert(client
< DIRTY_MEMORY_NUM
);
55 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
56 page
= start
>> TARGET_PAGE_BITS
;
57 next
= find_next_bit(ram_list
.dirty_memory
[client
], end
, page
);
62 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start
,
66 unsigned long end
, page
, next
;
68 assert(client
< DIRTY_MEMORY_NUM
);
70 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
71 page
= start
>> TARGET_PAGE_BITS
;
72 next
= find_next_zero_bit(ram_list
.dirty_memory
[client
], end
, page
);
77 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
80 return cpu_physical_memory_get_dirty(addr
, 1, client
);
83 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
85 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
86 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
88 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
89 return !(vga
&& code
&& migration
);
92 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start
,
98 if (mask
& (1 << DIRTY_MEMORY_VGA
) &&
99 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_VGA
)) {
100 ret
|= (1 << DIRTY_MEMORY_VGA
);
102 if (mask
& (1 << DIRTY_MEMORY_CODE
) &&
103 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_CODE
)) {
104 ret
|= (1 << DIRTY_MEMORY_CODE
);
106 if (mask
& (1 << DIRTY_MEMORY_MIGRATION
) &&
107 !cpu_physical_memory_all_dirty(start
, length
, DIRTY_MEMORY_MIGRATION
)) {
108 ret
|= (1 << DIRTY_MEMORY_MIGRATION
);
113 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
116 assert(client
< DIRTY_MEMORY_NUM
);
117 set_bit_atomic(addr
>> TARGET_PAGE_BITS
, ram_list
.dirty_memory
[client
]);
120 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
124 unsigned long end
, page
;
125 unsigned long **d
= ram_list
.dirty_memory
;
127 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
128 page
= start
>> TARGET_PAGE_BITS
;
129 if (likely(mask
& (1 << DIRTY_MEMORY_MIGRATION
))) {
130 bitmap_set_atomic(d
[DIRTY_MEMORY_MIGRATION
], page
, end
- page
);
132 if (unlikely(mask
& (1 << DIRTY_MEMORY_VGA
))) {
133 bitmap_set_atomic(d
[DIRTY_MEMORY_VGA
], page
, end
- page
);
135 if (unlikely(mask
& (1 << DIRTY_MEMORY_CODE
))) {
136 bitmap_set_atomic(d
[DIRTY_MEMORY_CODE
], page
, end
- page
);
138 xen_modified_memory(start
, length
);
142 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
147 unsigned long page_number
, c
;
150 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
151 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
152 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
154 /* start address is aligned at the start of a word? */
155 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
158 long nr
= BITS_TO_LONGS(pages
);
160 for (k
= 0; k
< nr
; k
++) {
162 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
163 unsigned long **d
= ram_list
.dirty_memory
;
165 atomic_or(&d
[DIRTY_MEMORY_MIGRATION
][page
+ k
], temp
);
166 atomic_or(&d
[DIRTY_MEMORY_VGA
][page
+ k
], temp
);
168 atomic_or(&d
[DIRTY_MEMORY_CODE
][page
+ k
], temp
);
172 xen_modified_memory(start
, pages
<< TARGET_PAGE_BITS
);
174 uint8_t clients
= tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE
;
176 * bitmap-traveling is faster than memory-traveling (for addr...)
177 * especially when most of the memory is not dirty.
179 for (i
= 0; i
< len
; i
++) {
180 if (bitmap
[i
] != 0) {
181 c
= leul_to_cpu(bitmap
[i
]);
185 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
186 addr
= page_number
* TARGET_PAGE_SIZE
;
187 ram_addr
= start
+ addr
;
188 cpu_physical_memory_set_dirty_range(ram_addr
,
189 TARGET_PAGE_SIZE
* hpratio
, clients
);
195 #endif /* not _WIN32 */
197 static inline void cpu_physical_memory_clear_dirty_range_type(ram_addr_t start
,
201 unsigned long end
, page
;
203 assert(client
< DIRTY_MEMORY_NUM
);
204 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
205 page
= start
>> TARGET_PAGE_BITS
;
206 bitmap_clear(ram_list
.dirty_memory
[client
], page
, end
- page
);
209 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
212 cpu_physical_memory_clear_dirty_range_type(start
, length
, DIRTY_MEMORY_MIGRATION
);
213 cpu_physical_memory_clear_dirty_range_type(start
, length
, DIRTY_MEMORY_VGA
);
214 cpu_physical_memory_clear_dirty_range_type(start
, length
, DIRTY_MEMORY_CODE
);
218 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,
222 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest
,
227 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
228 uint64_t num_dirty
= 0;
230 /* start address is aligned at the start of a word? */
231 if (((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) {
233 int nr
= BITS_TO_LONGS(length
>> TARGET_PAGE_BITS
);
234 unsigned long *src
= ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
];
236 for (k
= page
; k
< page
+ nr
; k
++) {
238 unsigned long new_dirty
;
239 new_dirty
= ~dest
[k
];
242 num_dirty
+= ctpopl(new_dirty
);
247 for (addr
= 0; addr
< length
; addr
+= TARGET_PAGE_SIZE
) {
248 if (cpu_physical_memory_get_dirty(start
+ addr
,
250 DIRTY_MEMORY_MIGRATION
)) {
251 long k
= (start
+ addr
) >> TARGET_PAGE_BITS
;
252 if (!test_and_set_bit(k
, dest
)) {
255 cpu_physical_memory_reset_dirty(start
+ addr
,
257 DIRTY_MEMORY_MIGRATION
);