vnc-enc-tight: fix Arguments in wrong order
[qemu/ar7.git] / include / exec / ram_addr.h
blob8fc75cdd2b71699dcd07096b0d860d9177177f04
1 /*
2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
25 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
26 bool share, const char *mem_path,
27 Error **errp);
28 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
29 MemoryRegion *mr, Error **errp);
30 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
31 int qemu_get_ram_fd(ram_addr_t addr);
32 void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
33 void *qemu_get_ram_ptr(ram_addr_t addr);
34 void qemu_ram_free(ram_addr_t addr);
35 void qemu_ram_free_from_ptr(ram_addr_t addr);
37 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
38 ram_addr_t length,
39 unsigned client)
41 unsigned long end, page, next;
43 assert(client < DIRTY_MEMORY_NUM);
45 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
46 page = start >> TARGET_PAGE_BITS;
47 next = find_next_bit(ram_list.dirty_memory[client], end, page);
49 return next < end;
52 static inline bool cpu_physical_memory_get_clean(ram_addr_t start,
53 ram_addr_t length,
54 unsigned client)
56 unsigned long end, page, next;
58 assert(client < DIRTY_MEMORY_NUM);
60 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
61 page = start >> TARGET_PAGE_BITS;
62 next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
64 return next < end;
67 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
68 unsigned client)
70 return cpu_physical_memory_get_dirty(addr, 1, client);
73 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
75 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
76 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
77 bool migration =
78 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
79 return !(vga && code && migration);
82 static inline bool cpu_physical_memory_range_includes_clean(ram_addr_t start,
83 ram_addr_t length)
85 bool vga = cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_VGA);
86 bool code = cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_CODE);
87 bool migration =
88 cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_MIGRATION);
89 return vga || code || migration;
92 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
93 unsigned client)
95 assert(client < DIRTY_MEMORY_NUM);
96 set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
99 static inline void cpu_physical_memory_set_dirty_range_nocode(ram_addr_t start,
100 ram_addr_t length)
102 unsigned long end, page;
104 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
105 page = start >> TARGET_PAGE_BITS;
106 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
107 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
110 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
111 ram_addr_t length)
113 unsigned long end, page;
115 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
116 page = start >> TARGET_PAGE_BITS;
117 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
118 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
119 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
120 xen_modified_memory(start, length);
123 #if !defined(_WIN32)
124 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
125 ram_addr_t start,
126 ram_addr_t pages)
128 unsigned long i, j;
129 unsigned long page_number, c;
130 hwaddr addr;
131 ram_addr_t ram_addr;
132 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
133 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
134 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
136 /* start address is aligned at the start of a word? */
137 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
138 (hpratio == 1)) {
139 long k;
140 long nr = BITS_TO_LONGS(pages);
142 for (k = 0; k < nr; k++) {
143 if (bitmap[k]) {
144 unsigned long temp = leul_to_cpu(bitmap[k]);
146 ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp;
147 ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
148 ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
151 xen_modified_memory(start, pages);
152 } else {
154 * bitmap-traveling is faster than memory-traveling (for addr...)
155 * especially when most of the memory is not dirty.
157 for (i = 0; i < len; i++) {
158 if (bitmap[i] != 0) {
159 c = leul_to_cpu(bitmap[i]);
160 do {
161 j = ctzl(c);
162 c &= ~(1ul << j);
163 page_number = (i * HOST_LONG_BITS + j) * hpratio;
164 addr = page_number * TARGET_PAGE_SIZE;
165 ram_addr = start + addr;
166 cpu_physical_memory_set_dirty_range(ram_addr,
167 TARGET_PAGE_SIZE * hpratio);
168 } while (c != 0);
173 #endif /* not _WIN32 */
175 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
176 ram_addr_t length,
177 unsigned client)
179 unsigned long end, page;
181 assert(client < DIRTY_MEMORY_NUM);
182 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
183 page = start >> TARGET_PAGE_BITS;
184 bitmap_clear(ram_list.dirty_memory[client], page, end - page);
187 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
188 unsigned client);
190 #endif
191 #endif