s390x/ccw: create s390 phb conditionally
[qemu.git] / include / exec / ram_addr.h
blobd017639f7ed15312bd9958bae38799fe9951e98c
1 /*
2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24 #include "exec/ramlist.h"
26 struct RAMBlock {
27 struct rcu_head rcu;
28 struct MemoryRegion *mr;
29 uint8_t *host;
30 ram_addr_t offset;
31 ram_addr_t used_length;
32 ram_addr_t max_length;
33 void (*resized)(const char*, uint64_t length, void *host);
34 uint32_t flags;
35 /* Protected by iothread lock. */
36 char idstr[256];
37 /* RCU-enabled, writes protected by the ramlist lock */
38 QLIST_ENTRY(RAMBlock) next;
39 QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
40 int fd;
41 size_t page_size;
42 /* dirty bitmap used during migration */
43 unsigned long *bmap;
44 /* bitmap of pages that haven't been sent even once
45 * only maintained and used in postcopy at the moment
46 * where it's used to send the dirtymap at the start
47 * of the postcopy phase
49 unsigned long *unsentmap;
52 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
54 return (b && b->host && offset < b->used_length) ? true : false;
57 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
59 assert(offset_in_ramblock(block, offset));
60 return (char *)block->host + offset;
63 long qemu_getrampagesize(void);
64 unsigned long last_ram_page(void);
65 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
66 bool share, const char *mem_path,
67 Error **errp);
68 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
69 bool share, int fd,
70 Error **errp);
71 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
72 MemoryRegion *mr, Error **errp);
73 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
74 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
75 void (*resized)(const char*,
76 uint64_t length,
77 void *host),
78 MemoryRegion *mr, Error **errp);
79 void qemu_ram_free(RAMBlock *block);
81 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
83 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
84 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
86 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
87 ram_addr_t length,
88 unsigned client)
90 DirtyMemoryBlocks *blocks;
91 unsigned long end, page;
92 unsigned long idx, offset, base;
93 bool dirty = false;
95 assert(client < DIRTY_MEMORY_NUM);
97 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
98 page = start >> TARGET_PAGE_BITS;
100 rcu_read_lock();
102 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
104 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
105 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
106 base = page - offset;
107 while (page < end) {
108 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
109 unsigned long num = next - base;
110 unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
111 if (found < num) {
112 dirty = true;
113 break;
116 page = next;
117 idx++;
118 offset = 0;
119 base += DIRTY_MEMORY_BLOCK_SIZE;
122 rcu_read_unlock();
124 return dirty;
127 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
128 ram_addr_t length,
129 unsigned client)
131 DirtyMemoryBlocks *blocks;
132 unsigned long end, page;
133 unsigned long idx, offset, base;
134 bool dirty = true;
136 assert(client < DIRTY_MEMORY_NUM);
138 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
139 page = start >> TARGET_PAGE_BITS;
141 rcu_read_lock();
143 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
145 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
146 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
147 base = page - offset;
148 while (page < end) {
149 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
150 unsigned long num = next - base;
151 unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
152 if (found < num) {
153 dirty = false;
154 break;
157 page = next;
158 idx++;
159 offset = 0;
160 base += DIRTY_MEMORY_BLOCK_SIZE;
163 rcu_read_unlock();
165 return dirty;
168 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
169 unsigned client)
171 return cpu_physical_memory_get_dirty(addr, 1, client);
174 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
176 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
177 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
178 bool migration =
179 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
180 return !(vga && code && migration);
183 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
184 ram_addr_t length,
185 uint8_t mask)
187 uint8_t ret = 0;
189 if (mask & (1 << DIRTY_MEMORY_VGA) &&
190 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
191 ret |= (1 << DIRTY_MEMORY_VGA);
193 if (mask & (1 << DIRTY_MEMORY_CODE) &&
194 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
195 ret |= (1 << DIRTY_MEMORY_CODE);
197 if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
198 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
199 ret |= (1 << DIRTY_MEMORY_MIGRATION);
201 return ret;
204 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
205 unsigned client)
207 unsigned long page, idx, offset;
208 DirtyMemoryBlocks *blocks;
210 assert(client < DIRTY_MEMORY_NUM);
212 page = addr >> TARGET_PAGE_BITS;
213 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
214 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
216 rcu_read_lock();
218 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
220 set_bit_atomic(offset, blocks->blocks[idx]);
222 rcu_read_unlock();
225 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
226 ram_addr_t length,
227 uint8_t mask)
229 DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
230 unsigned long end, page;
231 unsigned long idx, offset, base;
232 int i;
234 if (!mask && !xen_enabled()) {
235 return;
238 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
239 page = start >> TARGET_PAGE_BITS;
241 rcu_read_lock();
243 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
244 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
247 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
248 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
249 base = page - offset;
250 while (page < end) {
251 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
253 if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
254 bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
255 offset, next - page);
257 if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
258 bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
259 offset, next - page);
261 if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
262 bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
263 offset, next - page);
266 page = next;
267 idx++;
268 offset = 0;
269 base += DIRTY_MEMORY_BLOCK_SIZE;
272 rcu_read_unlock();
274 xen_hvm_modified_memory(start, length);
277 #if !defined(_WIN32)
278 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
279 ram_addr_t start,
280 ram_addr_t pages)
282 unsigned long i, j;
283 unsigned long page_number, c;
284 hwaddr addr;
285 ram_addr_t ram_addr;
286 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
287 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
288 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
290 /* start address is aligned at the start of a word? */
291 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
292 (hpratio == 1)) {
293 unsigned long **blocks[DIRTY_MEMORY_NUM];
294 unsigned long idx;
295 unsigned long offset;
296 long k;
297 long nr = BITS_TO_LONGS(pages);
299 idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
300 offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
301 DIRTY_MEMORY_BLOCK_SIZE);
303 rcu_read_lock();
305 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
306 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
309 for (k = 0; k < nr; k++) {
310 if (bitmap[k]) {
311 unsigned long temp = leul_to_cpu(bitmap[k]);
313 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
314 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
315 if (tcg_enabled()) {
316 atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
320 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
321 offset = 0;
322 idx++;
326 rcu_read_unlock();
328 xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
329 } else {
330 uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
332 * bitmap-traveling is faster than memory-traveling (for addr...)
333 * especially when most of the memory is not dirty.
335 for (i = 0; i < len; i++) {
336 if (bitmap[i] != 0) {
337 c = leul_to_cpu(bitmap[i]);
338 do {
339 j = ctzl(c);
340 c &= ~(1ul << j);
341 page_number = (i * HOST_LONG_BITS + j) * hpratio;
342 addr = page_number * TARGET_PAGE_SIZE;
343 ram_addr = start + addr;
344 cpu_physical_memory_set_dirty_range(ram_addr,
345 TARGET_PAGE_SIZE * hpratio, clients);
346 } while (c != 0);
351 #endif /* not _WIN32 */
353 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
354 ram_addr_t length,
355 unsigned client);
357 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
358 (ram_addr_t start, ram_addr_t length, unsigned client);
360 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
361 ram_addr_t start,
362 ram_addr_t length);
364 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
365 ram_addr_t length)
367 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
368 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
369 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
373 static inline
374 uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
375 ram_addr_t start,
376 ram_addr_t length,
377 uint64_t *real_dirty_pages)
379 ram_addr_t addr;
380 unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
381 uint64_t num_dirty = 0;
382 unsigned long *dest = rb->bmap;
384 /* start address is aligned at the start of a word? */
385 if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
386 (start + rb->offset)) {
387 int k;
388 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
389 unsigned long * const *src;
390 unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
391 unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
392 DIRTY_MEMORY_BLOCK_SIZE);
393 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
395 rcu_read_lock();
397 src = atomic_rcu_read(
398 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
400 for (k = page; k < page + nr; k++) {
401 if (src[idx][offset]) {
402 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
403 unsigned long new_dirty;
404 *real_dirty_pages += ctpopl(bits);
405 new_dirty = ~dest[k];
406 dest[k] |= bits;
407 new_dirty &= bits;
408 num_dirty += ctpopl(new_dirty);
411 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
412 offset = 0;
413 idx++;
417 rcu_read_unlock();
418 } else {
419 ram_addr_t offset = rb->offset;
421 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
422 if (cpu_physical_memory_test_and_clear_dirty(
423 start + addr + offset,
424 TARGET_PAGE_SIZE,
425 DIRTY_MEMORY_MIGRATION)) {
426 *real_dirty_pages += 1;
427 long k = (start + addr) >> TARGET_PAGE_BITS;
428 if (!test_and_set_bit(k, dest)) {
429 num_dirty++;
435 return num_dirty;
437 #endif
438 #endif