linux-user: Allow bad msg_name for recvfrom on connected socket
[qemu/ar7.git] / include / exec / ram_addr.h
blob2a9465da11bbcb7efaf05bd0bbe8320d1b68c196
1 /*
2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
25 struct RAMBlock {
26 struct rcu_head rcu;
27 struct MemoryRegion *mr;
28 uint8_t *host;
29 ram_addr_t offset;
30 ram_addr_t used_length;
31 ram_addr_t max_length;
32 void (*resized)(const char*, uint64_t length, void *host);
33 uint32_t flags;
34 /* Protected by iothread lock. */
35 char idstr[256];
36 /* RCU-enabled, writes protected by the ramlist lock */
37 QLIST_ENTRY(RAMBlock) next;
38 int fd;
41 static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
43 return (b && b->host && offset < b->used_length) ? true : false;
46 static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
48 assert(offset_in_ramblock(block, offset));
49 return (char *)block->host + offset;
52 /* The dirty memory bitmap is split into fixed-size blocks to allow growth
53 * under RCU. The bitmap for a block can be accessed as follows:
55 * rcu_read_lock();
57 * DirtyMemoryBlocks *blocks =
58 * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
60 * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
61 * unsigned long *block = blocks.blocks[idx];
62 * ...access block bitmap...
64 * rcu_read_unlock();
66 * Remember to check for the end of the block when accessing a range of
67 * addresses. Move on to the next block if you reach the end.
69 * Organization into blocks allows dirty memory to grow (but not shrink) under
70 * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
71 * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
72 * the same. Other threads can safely access existing blocks while dirty
73 * memory is being grown. When no threads are using the old DirtyMemoryBlocks
74 * anymore it is freed by RCU (but the underlying blocks stay because they are
75 * pointed to from the new DirtyMemoryBlocks).
77 #define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
78 typedef struct {
79 struct rcu_head rcu;
80 unsigned long *blocks[];
81 } DirtyMemoryBlocks;
83 typedef struct RAMList {
84 QemuMutex mutex;
85 RAMBlock *mru_block;
86 /* RCU-enabled, writes protected by the ramlist lock. */
87 QLIST_HEAD(, RAMBlock) blocks;
88 DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
89 uint32_t version;
90 } RAMList;
91 extern RAMList ram_list;
93 ram_addr_t last_ram_offset(void);
94 void qemu_mutex_lock_ramlist(void);
95 void qemu_mutex_unlock_ramlist(void);
97 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
98 bool share, const char *mem_path,
99 Error **errp);
100 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
101 MemoryRegion *mr, Error **errp);
102 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
103 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
104 void (*resized)(const char*,
105 uint64_t length,
106 void *host),
107 MemoryRegion *mr, Error **errp);
108 void qemu_ram_free(RAMBlock *block);
110 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
112 #define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
113 #define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
115 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
116 ram_addr_t length,
117 unsigned client)
119 DirtyMemoryBlocks *blocks;
120 unsigned long end, page;
121 unsigned long idx, offset, base;
122 bool dirty = false;
124 assert(client < DIRTY_MEMORY_NUM);
126 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
127 page = start >> TARGET_PAGE_BITS;
129 rcu_read_lock();
131 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
133 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
134 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
135 base = page - offset;
136 while (page < end) {
137 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
138 unsigned long num = next - base;
139 unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
140 if (found < num) {
141 dirty = true;
142 break;
145 page = next;
146 idx++;
147 offset = 0;
148 base += DIRTY_MEMORY_BLOCK_SIZE;
151 rcu_read_unlock();
153 return dirty;
156 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
157 ram_addr_t length,
158 unsigned client)
160 DirtyMemoryBlocks *blocks;
161 unsigned long end, page;
162 unsigned long idx, offset, base;
163 bool dirty = true;
165 assert(client < DIRTY_MEMORY_NUM);
167 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
168 page = start >> TARGET_PAGE_BITS;
170 rcu_read_lock();
172 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
174 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
175 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
176 base = page - offset;
177 while (page < end) {
178 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
179 unsigned long num = next - base;
180 unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
181 if (found < num) {
182 dirty = false;
183 break;
186 page = next;
187 idx++;
188 offset = 0;
189 base += DIRTY_MEMORY_BLOCK_SIZE;
192 rcu_read_unlock();
194 return dirty;
197 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
198 unsigned client)
200 return cpu_physical_memory_get_dirty(addr, 1, client);
203 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
205 bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
206 bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
207 bool migration =
208 cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
209 return !(vga && code && migration);
212 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
213 ram_addr_t length,
214 uint8_t mask)
216 uint8_t ret = 0;
218 if (mask & (1 << DIRTY_MEMORY_VGA) &&
219 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
220 ret |= (1 << DIRTY_MEMORY_VGA);
222 if (mask & (1 << DIRTY_MEMORY_CODE) &&
223 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
224 ret |= (1 << DIRTY_MEMORY_CODE);
226 if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
227 !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
228 ret |= (1 << DIRTY_MEMORY_MIGRATION);
230 return ret;
233 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
234 unsigned client)
236 unsigned long page, idx, offset;
237 DirtyMemoryBlocks *blocks;
239 assert(client < DIRTY_MEMORY_NUM);
241 page = addr >> TARGET_PAGE_BITS;
242 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
243 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
245 rcu_read_lock();
247 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
249 set_bit_atomic(offset, blocks->blocks[idx]);
251 rcu_read_unlock();
254 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
255 ram_addr_t length,
256 uint8_t mask)
258 DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
259 unsigned long end, page;
260 unsigned long idx, offset, base;
261 int i;
263 if (!mask && !xen_enabled()) {
264 return;
267 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
268 page = start >> TARGET_PAGE_BITS;
270 rcu_read_lock();
272 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
273 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
276 idx = page / DIRTY_MEMORY_BLOCK_SIZE;
277 offset = page % DIRTY_MEMORY_BLOCK_SIZE;
278 base = page - offset;
279 while (page < end) {
280 unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
282 if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
283 bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
284 offset, next - page);
286 if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
287 bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
288 offset, next - page);
290 if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
291 bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
292 offset, next - page);
295 page = next;
296 idx++;
297 offset = 0;
298 base += DIRTY_MEMORY_BLOCK_SIZE;
301 rcu_read_unlock();
303 xen_modified_memory(start, length);
306 #if !defined(_WIN32)
307 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
308 ram_addr_t start,
309 ram_addr_t pages)
311 unsigned long i, j;
312 unsigned long page_number, c;
313 hwaddr addr;
314 ram_addr_t ram_addr;
315 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
316 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
317 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
319 /* start address is aligned at the start of a word? */
320 if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
321 (hpratio == 1)) {
322 unsigned long **blocks[DIRTY_MEMORY_NUM];
323 unsigned long idx;
324 unsigned long offset;
325 long k;
326 long nr = BITS_TO_LONGS(pages);
328 idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
329 offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
330 DIRTY_MEMORY_BLOCK_SIZE);
332 rcu_read_lock();
334 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
335 blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
338 for (k = 0; k < nr; k++) {
339 if (bitmap[k]) {
340 unsigned long temp = leul_to_cpu(bitmap[k]);
342 atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
343 atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
344 if (tcg_enabled()) {
345 atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
349 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
350 offset = 0;
351 idx++;
355 rcu_read_unlock();
357 xen_modified_memory(start, pages << TARGET_PAGE_BITS);
358 } else {
359 uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
361 * bitmap-traveling is faster than memory-traveling (for addr...)
362 * especially when most of the memory is not dirty.
364 for (i = 0; i < len; i++) {
365 if (bitmap[i] != 0) {
366 c = leul_to_cpu(bitmap[i]);
367 do {
368 j = ctzl(c);
369 c &= ~(1ul << j);
370 page_number = (i * HOST_LONG_BITS + j) * hpratio;
371 addr = page_number * TARGET_PAGE_SIZE;
372 ram_addr = start + addr;
373 cpu_physical_memory_set_dirty_range(ram_addr,
374 TARGET_PAGE_SIZE * hpratio, clients);
375 } while (c != 0);
380 #endif /* not _WIN32 */
382 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
383 ram_addr_t length,
384 unsigned client);
386 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
387 ram_addr_t length)
389 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
390 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
391 cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
395 static inline
396 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
397 ram_addr_t start,
398 ram_addr_t length)
400 ram_addr_t addr;
401 unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
402 uint64_t num_dirty = 0;
404 /* start address is aligned at the start of a word? */
405 if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
406 int k;
407 int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
408 unsigned long * const *src;
409 unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
410 unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
411 DIRTY_MEMORY_BLOCK_SIZE);
413 rcu_read_lock();
415 src = atomic_rcu_read(
416 &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
418 for (k = page; k < page + nr; k++) {
419 if (src[idx][offset]) {
420 unsigned long bits = atomic_xchg(&src[idx][offset], 0);
421 unsigned long new_dirty;
422 new_dirty = ~dest[k];
423 dest[k] |= bits;
424 new_dirty &= bits;
425 num_dirty += ctpopl(new_dirty);
428 if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
429 offset = 0;
430 idx++;
434 rcu_read_unlock();
435 } else {
436 for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
437 if (cpu_physical_memory_test_and_clear_dirty(
438 start + addr,
439 TARGET_PAGE_SIZE,
440 DIRTY_MEMORY_MIGRATION)) {
441 long k = (start + addr) >> TARGET_PAGE_BITS;
442 if (!test_and_set_bit(k, dest)) {
443 num_dirty++;
449 return num_dirty;
452 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
453 #endif
454 #endif