4 /* CPU interfaces that are target independent. */
6 #ifdef TARGET_PHYS_ADDR_BITS
15 #include "qemu-queue.h"
17 #if !defined(CONFIG_USER_ONLY)
25 /* address in the RAM (different from a physical address) */
26 #if defined(CONFIG_XEN_BACKEND) && TARGET_PHYS_ADDR_BITS == 64
27 typedef uint64_t ram_addr_t
;
28 # define RAM_ADDR_MAX UINT64_MAX
29 # define RAM_ADDR_FMT "%" PRIx64
31 typedef uintptr_t ram_addr_t
;
32 # define RAM_ADDR_MAX UINTPTR_MAX
33 # define RAM_ADDR_FMT "%" PRIxPTR
38 typedef void CPUWriteMemoryFunc(void *opaque
, target_phys_addr_t addr
, uint32_t value
);
39 typedef uint32_t CPUReadMemoryFunc(void *opaque
, target_phys_addr_t addr
);
41 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
);
42 /* This should only be used for ram local to a device. */
43 void *qemu_get_ram_ptr(ram_addr_t addr
);
44 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
);
45 /* Same but slower, to use for migration, where the order of
46 * RAMBlocks must not change. */
47 void *qemu_safe_ram_ptr(ram_addr_t addr
);
48 void qemu_put_ram_ptr(void *addr
);
49 /* This should not be used by devices. */
50 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
);
51 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
);
52 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
);
54 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
55 int len
, int is_write
);
56 static inline void cpu_physical_memory_read(target_phys_addr_t addr
,
59 cpu_physical_memory_rw(addr
, buf
, len
, 0);
61 static inline void cpu_physical_memory_write(target_phys_addr_t addr
,
62 const void *buf
, int len
)
64 cpu_physical_memory_rw(addr
, (void *)buf
, len
, 1);
66 void *cpu_physical_memory_map(target_phys_addr_t addr
,
67 target_phys_addr_t
*plen
,
69 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
70 int is_write
, target_phys_addr_t access_len
);
71 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
));
72 void cpu_unregister_map_client(void *cookie
);
74 /* Coalesced MMIO regions are areas where write operations can be reordered.
75 * This usually implies that write operations are side-effect free. This allows
76 * batching which can make a major impact on performance when using
79 void qemu_flush_coalesced_mmio_buffer(void);
81 uint32_t ldub_phys(target_phys_addr_t addr
);
82 uint32_t lduw_le_phys(target_phys_addr_t addr
);
83 uint32_t lduw_be_phys(target_phys_addr_t addr
);
84 uint32_t ldl_le_phys(target_phys_addr_t addr
);
85 uint32_t ldl_be_phys(target_phys_addr_t addr
);
86 uint64_t ldq_le_phys(target_phys_addr_t addr
);
87 uint64_t ldq_be_phys(target_phys_addr_t addr
);
88 void stb_phys(target_phys_addr_t addr
, uint32_t val
);
89 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
);
90 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
);
91 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
);
92 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
);
93 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
);
94 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
);
97 uint32_t lduw_phys(target_phys_addr_t addr
);
98 uint32_t ldl_phys(target_phys_addr_t addr
);
99 uint64_t ldq_phys(target_phys_addr_t addr
);
100 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
);
101 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
);
102 void stw_phys(target_phys_addr_t addr
, uint32_t val
);
103 void stl_phys(target_phys_addr_t addr
, uint32_t val
);
104 void stq_phys(target_phys_addr_t addr
, uint64_t val
);
107 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
108 const uint8_t *buf
, int len
);
110 extern struct MemoryRegion io_mem_ram
;
111 extern struct MemoryRegion io_mem_rom
;
112 extern struct MemoryRegion io_mem_unassigned
;
113 extern struct MemoryRegion io_mem_notdirty
;
117 #endif /* !CPU_COMMON_H */