Add rate limiting of RTC_CHANGE, BALLOON_CHANGE & WATCHDOG events
[qemu/ar7.git] / exec-obsolete.h
blob792c8317188d559d0132b1d3fae815896d87cec8
1 /*
2 * Declarations for obsolete exec.c functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
19 #ifndef EXEC_OBSOLETE_H
20 #define EXEC_OBSOLETE_H
22 #ifndef WANT_EXEC_OBSOLETE
23 #error Do not include exec-obsolete.h
24 #endif
26 #ifndef CONFIG_USER_ONLY
28 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
29 MemoryRegion *mr);
30 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
31 void qemu_ram_free(ram_addr_t addr);
32 void qemu_ram_free_from_ptr(ram_addr_t addr);
34 struct MemoryRegion;
35 struct MemoryRegionSection;
36 void cpu_register_physical_memory_log(struct MemoryRegionSection *section,
37 bool readonly);
39 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
40 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
42 int cpu_physical_memory_set_dirty_tracking(int enable);
44 #define VGA_DIRTY_FLAG 0x01
45 #define CODE_DIRTY_FLAG 0x02
46 #define MIGRATION_DIRTY_FLAG 0x08
48 /* read dirty bit (return 0 or 1) */
49 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
51 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
54 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
56 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
59 static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
60 ram_addr_t length,
61 int dirty_flags)
63 int ret = 0;
64 uint8_t *p;
65 ram_addr_t addr, end;
67 end = TARGET_PAGE_ALIGN(start + length);
68 start &= TARGET_PAGE_MASK;
69 p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
70 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
71 ret |= *p++ & dirty_flags;
73 return ret;
76 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
78 ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
81 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
82 int dirty_flags)
84 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
87 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
88 ram_addr_t length,
89 int dirty_flags)
91 uint8_t *p;
92 ram_addr_t addr, end;
94 end = TARGET_PAGE_ALIGN(start + length);
95 start &= TARGET_PAGE_MASK;
96 p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
97 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
98 *p++ |= dirty_flags;
102 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
103 ram_addr_t length,
104 int dirty_flags)
106 int mask;
107 uint8_t *p;
108 ram_addr_t addr, end;
110 end = TARGET_PAGE_ALIGN(start + length);
111 start &= TARGET_PAGE_MASK;
112 mask = ~dirty_flags;
113 p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
114 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
115 *p++ &= mask;
119 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
120 int dirty_flags);
122 extern const IORangeOps memory_region_iorange_ops;
124 #endif
126 #endif