msi: Add msi_get_message()
[qemu/ar7.git] / memory-internal.h
blob1c34b9751ccfe5ced026fdcbe466b29abab54783
1 /*
2 * Declarations for obsolete exec.c functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
19 #ifndef MEMORY_INTERNAL_H
20 #define MEMORY_INTERNAL_H
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen.h"
25 typedef struct PhysPageEntry PhysPageEntry;
27 struct PhysPageEntry {
28 uint16_t is_leaf : 1;
29 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
30 uint16_t ptr : 15;
33 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
35 struct AddressSpaceDispatch {
36 /* This is a multi-level map on the physical address space.
37 * The bottom level has pointers to MemoryRegionSections.
39 PhysPageEntry phys_map;
40 MemoryListener listener;
43 void address_space_init_dispatch(AddressSpace *as);
44 void address_space_destroy_dispatch(AddressSpace *as);
46 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
47 MemoryRegion *mr);
48 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
49 void qemu_ram_free(ram_addr_t addr);
50 void qemu_ram_free_from_ptr(ram_addr_t addr);
52 struct MemoryRegion;
53 struct MemoryRegionSection;
55 void qemu_register_coalesced_mmio(hwaddr addr, ram_addr_t size);
56 void qemu_unregister_coalesced_mmio(hwaddr addr, ram_addr_t size);
58 int cpu_physical_memory_set_dirty_tracking(int enable);
60 #define VGA_DIRTY_FLAG 0x01
61 #define CODE_DIRTY_FLAG 0x02
62 #define MIGRATION_DIRTY_FLAG 0x08
64 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
66 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
69 /* read dirty bit (return 0 or 1) */
70 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
72 return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
75 static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
76 ram_addr_t length,
77 int dirty_flags)
79 int ret = 0;
80 ram_addr_t addr, end;
82 end = TARGET_PAGE_ALIGN(start + length);
83 start &= TARGET_PAGE_MASK;
84 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
85 ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
87 return ret;
90 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
91 int dirty_flags)
93 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
96 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
98 cpu_physical_memory_set_dirty_flags(addr, 0xff);
101 static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
102 int dirty_flags)
104 int mask = ~dirty_flags;
106 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
109 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
110 ram_addr_t length,
111 int dirty_flags)
113 ram_addr_t addr, end;
115 end = TARGET_PAGE_ALIGN(start + length);
116 start &= TARGET_PAGE_MASK;
117 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
118 cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
120 xen_modified_memory(addr, length);
123 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
124 ram_addr_t length,
125 int dirty_flags)
127 ram_addr_t addr, end;
129 end = TARGET_PAGE_ALIGN(start + length);
130 start &= TARGET_PAGE_MASK;
131 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
132 cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
136 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
137 int dirty_flags);
139 extern const IORangeOps memory_region_iorange_ops;
141 #endif
143 #endif