Merge commit 'afb63ebd0a9599312c27ecceb839a399740e00ef' into upstream-merge
[qemu-kvm.git] / exec-obsolete.h
blob286e2f75e1a55bf78d09d879e679df53274715b2
1 /*
2 * Declarations for obsolete exec.c functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
19 #ifndef EXEC_OBSOLETE_H
20 #define EXEC_OBSOLETE_H
22 #ifndef WANT_EXEC_OBSOLETE
23 #error Do not include exec-obsolete.h
24 #endif
26 #ifndef CONFIG_USER_ONLY
27 #include "hw/xen.h"
29 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
30 MemoryRegion *mr);
31 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
32 void qemu_ram_free(ram_addr_t addr);
33 void qemu_ram_free_from_ptr(ram_addr_t addr);
35 struct MemoryRegion;
36 struct MemoryRegionSection;
37 void cpu_register_physical_memory_log(struct MemoryRegionSection *section,
38 bool readonly);
40 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
41 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
43 int cpu_physical_memory_set_dirty_tracking(int enable);
45 #define VGA_DIRTY_FLAG 0x01
46 #define CODE_DIRTY_FLAG 0x02
47 #define MIGRATION_DIRTY_FLAG 0x08
49 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
51 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
54 /* read dirty bit (return 0 or 1) */
55 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
57 return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
60 static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
61 ram_addr_t length,
62 int dirty_flags)
64 int ret = 0;
65 ram_addr_t addr, end;
67 end = TARGET_PAGE_ALIGN(start + length);
68 start &= TARGET_PAGE_MASK;
69 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
70 ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
72 return ret;
75 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
76 int dirty_flags)
78 if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
79 !cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
80 MIGRATION_DIRTY_FLAG)) {
81 ram_list.dirty_pages++;
83 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
86 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
88 cpu_physical_memory_set_dirty_flags(addr, 0xff);
91 static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
92 int dirty_flags)
94 int mask = ~dirty_flags;
96 if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
97 cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
98 MIGRATION_DIRTY_FLAG)) {
99 ram_list.dirty_pages--;
101 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
104 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
105 ram_addr_t length,
106 int dirty_flags)
108 ram_addr_t addr, end;
110 end = TARGET_PAGE_ALIGN(start + length);
111 start &= TARGET_PAGE_MASK;
112 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
113 cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
115 xen_modified_memory(addr, length);
118 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
119 ram_addr_t length,
120 int dirty_flags)
122 ram_addr_t addr, end;
124 end = TARGET_PAGE_ALIGN(start + length);
125 start &= TARGET_PAGE_MASK;
126 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
127 cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
131 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
132 int dirty_flags);
134 extern const IORangeOps memory_region_iorange_ops;
136 #endif
138 #endif