Merge remote-tracking branch 'remotes/otubo/tags/pull-seccomp-20150325' into staging
[qemu.git] / include / exec / ram_addr.h
blobff558a47345c4784a422be1365913855134fbe5a
1 /*
2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
25 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
26 bool share, const char *mem_path,
27 Error **errp);
28 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
29 MemoryRegion *mr, Error **errp);
30 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
31 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
32 void (*resized)(const char*,
33 uint64_t length,
34 void *host),
35 MemoryRegion *mr, Error **errp);
36 int qemu_get_ram_fd(ram_addr_t addr);
37 void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
38 void *qemu_get_ram_ptr(ram_addr_t addr);
39 void qemu_ram_free(ram_addr_t addr);
40 void qemu_ram_free_from_ptr(ram_addr_t addr);
42 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
44 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
45 ram_addr_t length,
46 unsigned client)
48 unsigned long end, page, next;
50 assert(client < DIRTY_MEMORY_NUM);
52 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
53 page = start >> TARGET_PAGE_BITS;
54 next = find_next_bit(ram_list.dirty_memory[client], end, page);
56 return next < end;
59 static inline bool cpu_physical_memory_get_clean(ram_addr_t start,
60 ram_addr_t length,
61 unsigned client)
63 unsigned long end, page, next;
65 assert(client < DIRTY_MEMORY_NUM);
67 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
68 page = start >> TARGET_PAGE_BITS;
69 next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
71 return next < end;
74 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
75 unsigned client)
77 return cpu_physical_memory_get_dirty(addr, 1, client);
80 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
82 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
83 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
84 bool migration =
85 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
86 return !(vga && code && migration);
89 static inline bool cpu_physical_memory_range_includes_clean(ram_addr_t start,
90 ram_addr_t length)
92 bool vga = cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_VGA);
93 bool code = cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_CODE);
94 bool migration =
95 cpu_physical_memory_get_clean(start, length, DIRTY_MEMORY_MIGRATION);
96 return vga || code || migration;
99 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
100 unsigned client)
102 assert(client < DIRTY_MEMORY_NUM);
103 set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
106 static inline void cpu_physical_memory_set_dirty_range_nocode(ram_addr_t start,
107 ram_addr_t length)
109 unsigned long end, page;
111 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
112 page = start >> TARGET_PAGE_BITS;
113 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
114 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
117 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
118 ram_addr_t length)
120 unsigned long end, page;
122 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
123 page = start >> TARGET_PAGE_BITS;
124 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page);
125 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page);
126 bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page);
127 xen_modified_memory(start, length);
130 #if !defined(_WIN32)
131 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
132 ram_addr_t start,
133 ram_addr_t pages)
135 unsigned long i, j;
136 unsigned long page_number, c;
137 hwaddr addr;
138 ram_addr_t ram_addr;
139 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
140 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
141 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
143 /* start address is aligned at the start of a word? */
144 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
145 (hpratio == 1)) {
146 long k;
147 long nr = BITS_TO_LONGS(pages);
149 for (k = 0; k < nr; k++) {
150 if (bitmap[k]) {
151 unsigned long temp = leul_to_cpu(bitmap[k]);
153 ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp;
154 ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp;
155 ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp;
158 xen_modified_memory(start, pages);
159 } else {
161 * bitmap-traveling is faster than memory-traveling (for addr...)
162 * especially when most of the memory is not dirty.
164 for (i = 0; i < len; i++) {
165 if (bitmap[i] != 0) {
166 c = leul_to_cpu(bitmap[i]);
167 do {
168 j = ctzl(c);
169 c &= ~(1ul << j);
170 page_number = (i * HOST_LONG_BITS + j) * hpratio;
171 addr = page_number * TARGET_PAGE_SIZE;
172 ram_addr = start + addr;
173 cpu_physical_memory_set_dirty_range(ram_addr,
174 TARGET_PAGE_SIZE * hpratio);
175 } while (c != 0);
180 #endif /* not _WIN32 */
182 static inline void cpu_physical_memory_clear_dirty_range_type(ram_addr_t start,
183 ram_addr_t length,
184 unsigned client)
186 unsigned long end, page;
188 assert(client < DIRTY_MEMORY_NUM);
189 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
190 page = start >> TARGET_PAGE_BITS;
191 bitmap_clear(ram_list.dirty_memory[client], page, end - page);
194 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
195 ram_addr_t length)
197 cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_MIGRATION);
198 cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_VGA);
199 cpu_physical_memory_clear_dirty_range_type(start, length, DIRTY_MEMORY_CODE);
203 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
204 unsigned client);
206 #endif
207 #endif