2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
25 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
26 bool share
, const char *mem_path
,
28 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
29 MemoryRegion
*mr
, Error
**errp
);
30 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
);
31 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t max_size
,
32 void (*resized
)(const char*,
35 MemoryRegion
*mr
, Error
**errp
);
36 int qemu_get_ram_fd(ram_addr_t addr
);
37 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
);
38 void *qemu_get_ram_ptr(ram_addr_t addr
);
39 void qemu_ram_free(ram_addr_t addr
);
40 void qemu_ram_free_from_ptr(ram_addr_t addr
);
42 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
);
44 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
48 unsigned long end
, page
, next
;
50 assert(client
< DIRTY_MEMORY_NUM
);
52 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
53 page
= start
>> TARGET_PAGE_BITS
;
54 next
= find_next_bit(ram_list
.dirty_memory
[client
], end
, page
);
59 static inline bool cpu_physical_memory_get_clean(ram_addr_t start
,
63 unsigned long end
, page
, next
;
65 assert(client
< DIRTY_MEMORY_NUM
);
67 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
68 page
= start
>> TARGET_PAGE_BITS
;
69 next
= find_next_zero_bit(ram_list
.dirty_memory
[client
], end
, page
);
74 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
77 return cpu_physical_memory_get_dirty(addr
, 1, client
);
80 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
82 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
83 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
85 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
86 return !(vga
&& code
&& migration
);
89 static inline bool cpu_physical_memory_range_includes_clean(ram_addr_t start
,
92 bool vga
= cpu_physical_memory_get_clean(start
, length
, DIRTY_MEMORY_VGA
);
93 bool code
= cpu_physical_memory_get_clean(start
, length
, DIRTY_MEMORY_CODE
);
95 cpu_physical_memory_get_clean(start
, length
, DIRTY_MEMORY_MIGRATION
);
96 return vga
|| code
|| migration
;
99 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
102 assert(client
< DIRTY_MEMORY_NUM
);
103 set_bit(addr
>> TARGET_PAGE_BITS
, ram_list
.dirty_memory
[client
]);
106 static inline void cpu_physical_memory_set_dirty_range_nocode(ram_addr_t start
,
109 unsigned long end
, page
;
111 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
112 page
= start
>> TARGET_PAGE_BITS
;
113 bitmap_set(ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
], page
, end
- page
);
114 bitmap_set(ram_list
.dirty_memory
[DIRTY_MEMORY_VGA
], page
, end
- page
);
117 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
120 unsigned long end
, page
;
122 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
123 page
= start
>> TARGET_PAGE_BITS
;
124 bitmap_set(ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
], page
, end
- page
);
125 bitmap_set(ram_list
.dirty_memory
[DIRTY_MEMORY_VGA
], page
, end
- page
);
126 bitmap_set(ram_list
.dirty_memory
[DIRTY_MEMORY_CODE
], page
, end
- page
);
127 xen_modified_memory(start
, length
);
131 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
136 unsigned long page_number
, c
;
139 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
140 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
141 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
143 /* start address is aligned at the start of a word? */
144 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
147 long nr
= BITS_TO_LONGS(pages
);
149 for (k
= 0; k
< nr
; k
++) {
151 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
153 ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
][page
+ k
] |= temp
;
154 ram_list
.dirty_memory
[DIRTY_MEMORY_VGA
][page
+ k
] |= temp
;
155 ram_list
.dirty_memory
[DIRTY_MEMORY_CODE
][page
+ k
] |= temp
;
158 xen_modified_memory(start
, pages
);
161 * bitmap-traveling is faster than memory-traveling (for addr...)
162 * especially when most of the memory is not dirty.
164 for (i
= 0; i
< len
; i
++) {
165 if (bitmap
[i
] != 0) {
166 c
= leul_to_cpu(bitmap
[i
]);
170 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
171 addr
= page_number
* TARGET_PAGE_SIZE
;
172 ram_addr
= start
+ addr
;
173 cpu_physical_memory_set_dirty_range(ram_addr
,
174 TARGET_PAGE_SIZE
* hpratio
);
180 #endif /* not _WIN32 */
182 static inline void cpu_physical_memory_clear_dirty_range_type(ram_addr_t start
,
186 unsigned long end
, page
;
188 assert(client
< DIRTY_MEMORY_NUM
);
189 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
190 page
= start
>> TARGET_PAGE_BITS
;
191 bitmap_clear(ram_list
.dirty_memory
[client
], page
, end
- page
);
194 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
197 cpu_physical_memory_clear_dirty_range_type(start
, length
, DIRTY_MEMORY_MIGRATION
);
198 cpu_physical_memory_clear_dirty_range_type(start
, length
, DIRTY_MEMORY_VGA
);
199 cpu_physical_memory_clear_dirty_range_type(start
, length
, DIRTY_MEMORY_CODE
);
203 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,