4 /* CPU interfaces that are target independent. */
6 #ifdef TARGET_PHYS_ADDR_BITS
15 #include "qemu-queue.h"
17 #if !defined(CONFIG_USER_ONLY)
25 /* address in the RAM (different from a physical address) */
26 #if defined(CONFIG_XEN_BACKEND) && TARGET_PHYS_ADDR_BITS == 64
27 typedef uint64_t ram_addr_t
;
28 # define RAM_ADDR_MAX UINT64_MAX
29 # define RAM_ADDR_FMT "%" PRIx64
31 typedef unsigned long ram_addr_t
;
32 # define RAM_ADDR_MAX ULONG_MAX
33 # define RAM_ADDR_FMT "%lx"
38 typedef void CPUWriteMemoryFunc(void *opaque
, target_phys_addr_t addr
, uint32_t value
);
39 typedef uint32_t CPUReadMemoryFunc(void *opaque
, target_phys_addr_t addr
);
41 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
);
42 /* This should only be used for ram local to a device. */
43 void *qemu_get_ram_ptr(ram_addr_t addr
);
44 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
);
45 /* Same but slower, to use for migration, where the order of
46 * RAMBlocks must not change. */
47 void *qemu_safe_ram_ptr(ram_addr_t addr
);
48 void qemu_put_ram_ptr(void *addr
);
49 /* This should not be used by devices. */
50 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
);
51 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
);
53 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
54 int len
, int is_write
);
55 static inline void cpu_physical_memory_read(target_phys_addr_t addr
,
58 cpu_physical_memory_rw(addr
, buf
, len
, 0);
60 static inline void cpu_physical_memory_write(target_phys_addr_t addr
,
61 const void *buf
, int len
)
63 cpu_physical_memory_rw(addr
, (void *)buf
, len
, 1);
65 void *cpu_physical_memory_map(target_phys_addr_t addr
,
66 target_phys_addr_t
*plen
,
68 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
69 int is_write
, target_phys_addr_t access_len
);
70 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
));
71 void cpu_unregister_map_client(void *cookie
);
73 /* Coalesced MMIO regions are areas where write operations can be reordered.
74 * This usually implies that write operations are side-effect free. This allows
75 * batching which can make a major impact on performance when using
78 void qemu_flush_coalesced_mmio_buffer(void);
80 uint32_t ldub_phys(target_phys_addr_t addr
);
81 uint32_t lduw_le_phys(target_phys_addr_t addr
);
82 uint32_t lduw_be_phys(target_phys_addr_t addr
);
83 uint32_t ldl_le_phys(target_phys_addr_t addr
);
84 uint32_t ldl_be_phys(target_phys_addr_t addr
);
85 uint64_t ldq_le_phys(target_phys_addr_t addr
);
86 uint64_t ldq_be_phys(target_phys_addr_t addr
);
87 void stb_phys(target_phys_addr_t addr
, uint32_t val
);
88 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
);
89 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
);
90 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
);
91 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
);
92 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
);
93 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
);
96 uint32_t lduw_phys(target_phys_addr_t addr
);
97 uint32_t ldl_phys(target_phys_addr_t addr
);
98 uint64_t ldq_phys(target_phys_addr_t addr
);
99 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
);
100 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
);
101 void stw_phys(target_phys_addr_t addr
, uint32_t val
);
102 void stl_phys(target_phys_addr_t addr
, uint32_t val
);
103 void stq_phys(target_phys_addr_t addr
, uint64_t val
);
106 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
107 const uint8_t *buf
, int len
);
109 #define IO_MEM_SHIFT 3
111 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
112 #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
113 #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
114 #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
115 #define IO_MEM_SUBPAGE_RAM (4 << IO_MEM_SHIFT)
117 /* Acts like a ROM when read and like a device when written. */
118 #define IO_MEM_ROMD (1)
119 #define IO_MEM_SUBPAGE (2)
123 #endif /* !CPU_COMMON_H */