4 /* CPU interfaces that are target independent. */
13 #include "qemu-queue.h"
15 #if !defined(CONFIG_USER_ONLY)
23 /* address in the RAM (different from a physical address) */
24 #if defined(CONFIG_XEN_BACKEND)
25 typedef uint64_t ram_addr_t
;
26 # define RAM_ADDR_MAX UINT64_MAX
27 # define RAM_ADDR_FMT "%" PRIx64
29 typedef uintptr_t ram_addr_t
;
30 # define RAM_ADDR_MAX UINTPTR_MAX
31 # define RAM_ADDR_FMT "%" PRIxPTR
36 typedef void CPUWriteMemoryFunc(void *opaque
, target_phys_addr_t addr
, uint32_t value
);
37 typedef uint32_t CPUReadMemoryFunc(void *opaque
, target_phys_addr_t addr
);
39 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
);
40 /* This should only be used for ram local to a device. */
41 void *qemu_get_ram_ptr(ram_addr_t addr
);
42 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
);
43 /* Same but slower, to use for migration, where the order of
44 * RAMBlocks must not change. */
45 void *qemu_safe_ram_ptr(ram_addr_t addr
);
46 void qemu_put_ram_ptr(void *addr
);
47 /* This should not be used by devices. */
48 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
);
49 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
);
50 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
);
52 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
53 int len
, int is_write
);
54 static inline void cpu_physical_memory_read(target_phys_addr_t addr
,
57 cpu_physical_memory_rw(addr
, buf
, len
, 0);
59 static inline void cpu_physical_memory_write(target_phys_addr_t addr
,
60 const void *buf
, int len
)
62 cpu_physical_memory_rw(addr
, (void *)buf
, len
, 1);
64 void *cpu_physical_memory_map(target_phys_addr_t addr
,
65 target_phys_addr_t
*plen
,
67 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
68 int is_write
, target_phys_addr_t access_len
);
69 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
));
70 void cpu_unregister_map_client(void *cookie
);
72 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr
);
74 /* Coalesced MMIO regions are areas where write operations can be reordered.
75 * This usually implies that write operations are side-effect free. This allows
76 * batching which can make a major impact on performance when using
79 void qemu_flush_coalesced_mmio_buffer(void);
81 uint32_t ldub_phys(target_phys_addr_t addr
);
82 uint32_t lduw_le_phys(target_phys_addr_t addr
);
83 uint32_t lduw_be_phys(target_phys_addr_t addr
);
84 uint32_t ldl_le_phys(target_phys_addr_t addr
);
85 uint32_t ldl_be_phys(target_phys_addr_t addr
);
86 uint64_t ldq_le_phys(target_phys_addr_t addr
);
87 uint64_t ldq_be_phys(target_phys_addr_t addr
);
88 void stb_phys(target_phys_addr_t addr
, uint32_t val
);
89 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
);
90 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
);
91 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
);
92 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
);
93 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
);
94 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
);
97 uint32_t lduw_phys(target_phys_addr_t addr
);
98 uint32_t ldl_phys(target_phys_addr_t addr
);
99 uint64_t ldq_phys(target_phys_addr_t addr
);
100 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
);
101 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
);
102 void stw_phys(target_phys_addr_t addr
, uint32_t val
);
103 void stl_phys(target_phys_addr_t addr
, uint32_t val
);
104 void stq_phys(target_phys_addr_t addr
, uint64_t val
);
107 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
108 const uint8_t *buf
, int len
);
110 extern struct MemoryRegion io_mem_ram
;
111 extern struct MemoryRegion io_mem_rom
;
112 extern struct MemoryRegion io_mem_unassigned
;
113 extern struct MemoryRegion io_mem_notdirty
;
117 #endif /* !CPU_COMMON_H */