4 /* CPU interfaces that are target independent. */
6 #ifdef TARGET_PHYS_ADDR_BITS
15 #include "qemu-queue.h"
17 #if !defined(CONFIG_USER_ONLY)
25 /* address in the RAM (different from a physical address) */
26 #if defined(CONFIG_XEN_BACKEND) && TARGET_PHYS_ADDR_BITS == 64
27 typedef uint64_t ram_addr_t
;
28 # define RAM_ADDR_MAX UINT64_MAX
29 # define RAM_ADDR_FMT "%" PRIx64
31 typedef uintptr_t ram_addr_t
;
32 # define RAM_ADDR_MAX UINTPTR_MAX
33 # define RAM_ADDR_FMT "%" PRIxPTR
38 typedef void CPUWriteMemoryFunc(void *opaque
, target_phys_addr_t addr
, uint32_t value
);
39 typedef uint32_t CPUReadMemoryFunc(void *opaque
, target_phys_addr_t addr
);
41 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
);
42 /* This should only be used for ram local to a device. */
43 void *qemu_get_ram_ptr(ram_addr_t addr
);
44 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
);
45 /* Same but slower, to use for migration, where the order of
46 * RAMBlocks must not change. */
47 void *qemu_safe_ram_ptr(ram_addr_t addr
);
48 void qemu_put_ram_ptr(void *addr
);
49 /* This should not be used by devices. */
50 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
);
51 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
);
52 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
);
54 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
55 int len
, int is_write
);
56 static inline void cpu_physical_memory_read(target_phys_addr_t addr
,
59 cpu_physical_memory_rw(addr
, buf
, len
, 0);
61 static inline void cpu_physical_memory_write(target_phys_addr_t addr
,
62 const void *buf
, int len
)
64 cpu_physical_memory_rw(addr
, (void *)buf
, len
, 1);
66 void *cpu_physical_memory_map(target_phys_addr_t addr
,
67 target_phys_addr_t
*plen
,
69 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
70 int is_write
, target_phys_addr_t access_len
);
71 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
));
72 void cpu_unregister_map_client(void *cookie
);
74 #ifndef CONFIG_USER_ONLY
75 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr
);
78 /* Coalesced MMIO regions are areas where write operations can be reordered.
79 * This usually implies that write operations are side-effect free. This allows
80 * batching which can make a major impact on performance when using
83 void qemu_flush_coalesced_mmio_buffer(void);
85 uint32_t ldub_phys(target_phys_addr_t addr
);
86 uint32_t lduw_le_phys(target_phys_addr_t addr
);
87 uint32_t lduw_be_phys(target_phys_addr_t addr
);
88 uint32_t ldl_le_phys(target_phys_addr_t addr
);
89 uint32_t ldl_be_phys(target_phys_addr_t addr
);
90 uint64_t ldq_le_phys(target_phys_addr_t addr
);
91 uint64_t ldq_be_phys(target_phys_addr_t addr
);
92 void stb_phys(target_phys_addr_t addr
, uint32_t val
);
93 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
);
94 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
);
95 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
);
96 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
);
97 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
);
98 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
);
101 uint32_t lduw_phys(target_phys_addr_t addr
);
102 uint32_t ldl_phys(target_phys_addr_t addr
);
103 uint64_t ldq_phys(target_phys_addr_t addr
);
104 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
);
105 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
);
106 void stw_phys(target_phys_addr_t addr
, uint32_t val
);
107 void stl_phys(target_phys_addr_t addr
, uint32_t val
);
108 void stq_phys(target_phys_addr_t addr
, uint64_t val
);
111 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
112 const uint8_t *buf
, int len
);
114 extern struct MemoryRegion io_mem_ram
;
115 extern struct MemoryRegion io_mem_rom
;
116 extern struct MemoryRegion io_mem_unassigned
;
117 extern struct MemoryRegion io_mem_notdirty
;
121 #endif /* !CPU_COMMON_H */