4 /* CPU interfaces that are target independent. */
6 #ifndef CONFIG_USER_ONLY
7 #include "exec/hwaddr.h"
10 /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
11 void qemu_init_cpu_list(void);
12 void cpu_list_lock(void);
13 void cpu_list_unlock(void);
15 void tcg_flush_softmmu_tlb(CPUState
*cs
);
17 void tcg_iommu_init_notifier_list(CPUState
*cpu
);
18 void tcg_iommu_free_notifier_list(CPUState
*cpu
);
20 #if !defined(CONFIG_USER_ONLY)
28 #if defined(HOST_WORDS_BIGENDIAN)
29 #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
31 #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
34 /* address in the RAM (different from a physical address) */
35 #if defined(CONFIG_XEN_BACKEND)
36 typedef uint64_t ram_addr_t
;
37 # define RAM_ADDR_MAX UINT64_MAX
38 # define RAM_ADDR_FMT "%" PRIx64
40 typedef uintptr_t ram_addr_t
;
41 # define RAM_ADDR_MAX UINTPTR_MAX
42 # define RAM_ADDR_FMT "%" PRIxPTR
47 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
);
48 /* This should not be used by devices. */
49 ram_addr_t
qemu_ram_addr_from_host(void *ptr
);
50 RAMBlock
*qemu_ram_block_by_name(const char *name
);
51 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
53 ram_addr_t
qemu_ram_block_host_offset(RAMBlock
*rb
, void *host
);
54 void qemu_ram_set_idstr(RAMBlock
*block
, const char *name
, DeviceState
*dev
);
55 void qemu_ram_unset_idstr(RAMBlock
*block
);
56 const char *qemu_ram_get_idstr(RAMBlock
*rb
);
57 void *qemu_ram_get_host_addr(RAMBlock
*rb
);
58 ram_addr_t
qemu_ram_get_offset(RAMBlock
*rb
);
59 ram_addr_t
qemu_ram_get_used_length(RAMBlock
*rb
);
60 bool qemu_ram_is_shared(RAMBlock
*rb
);
61 bool qemu_ram_is_uf_zeroable(RAMBlock
*rb
);
62 void qemu_ram_set_uf_zeroable(RAMBlock
*rb
);
63 bool qemu_ram_is_migratable(RAMBlock
*rb
);
64 void qemu_ram_set_migratable(RAMBlock
*rb
);
65 void qemu_ram_unset_migratable(RAMBlock
*rb
);
67 size_t qemu_ram_pagesize(RAMBlock
*block
);
68 size_t qemu_ram_pagesize_largest(void);
70 void cpu_physical_memory_rw(hwaddr addr
, void *buf
,
71 hwaddr len
, bool is_write
);
72 static inline void cpu_physical_memory_read(hwaddr addr
,
73 void *buf
, hwaddr len
)
75 cpu_physical_memory_rw(addr
, buf
, len
, false);
77 static inline void cpu_physical_memory_write(hwaddr addr
,
78 const void *buf
, hwaddr len
)
80 cpu_physical_memory_rw(addr
, (void *)buf
, len
, true);
82 void *cpu_physical_memory_map(hwaddr addr
,
85 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
86 bool is_write
, hwaddr access_len
);
87 void cpu_register_map_client(QEMUBH
*bh
);
88 void cpu_unregister_map_client(QEMUBH
*bh
);
90 bool cpu_physical_memory_is_io(hwaddr phys_addr
);
92 /* Coalesced MMIO regions are areas where write operations can be reordered.
93 * This usually implies that write operations are side-effect free. This allows
94 * batching which can make a major impact on performance when using
97 void qemu_flush_coalesced_mmio_buffer(void);
99 void cpu_flush_icache_range(hwaddr start
, hwaddr len
);
101 typedef int (RAMBlockIterFunc
)(RAMBlock
*rb
, void *opaque
);
103 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
);
104 int ram_block_discard_range(RAMBlock
*rb
, uint64_t start
, size_t length
);
109 extern int singlestep
;
111 #endif /* CPU_COMMON_H */