4 /* CPU interfaces that are target independent. */
6 #ifndef CONFIG_USER_ONLY
7 #include "exec/hwaddr.h"
10 /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
11 void qemu_init_cpu_list(void);
12 void cpu_list_lock(void);
13 void cpu_list_unlock(void);
15 void tcg_flush_softmmu_tlb(CPUState
*cs
);
17 #if !defined(CONFIG_USER_ONLY)
25 #if defined(HOST_WORDS_BIGENDIAN)
26 #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
28 #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
31 /* address in the RAM (different from a physical address) */
32 #if defined(CONFIG_XEN_BACKEND)
33 typedef uint64_t ram_addr_t
;
34 # define RAM_ADDR_MAX UINT64_MAX
35 # define RAM_ADDR_FMT "%" PRIx64
37 typedef uintptr_t ram_addr_t
;
38 # define RAM_ADDR_MAX UINTPTR_MAX
39 # define RAM_ADDR_FMT "%" PRIxPTR
42 extern ram_addr_t ram_size
;
46 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
);
47 /* This should not be used by devices. */
48 ram_addr_t
qemu_ram_addr_from_host(void *ptr
);
49 RAMBlock
*qemu_ram_block_by_name(const char *name
);
50 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
52 ram_addr_t
qemu_ram_block_host_offset(RAMBlock
*rb
, void *host
);
53 void qemu_ram_set_idstr(RAMBlock
*block
, const char *name
, DeviceState
*dev
);
54 void qemu_ram_unset_idstr(RAMBlock
*block
);
55 const char *qemu_ram_get_idstr(RAMBlock
*rb
);
56 void *qemu_ram_get_host_addr(RAMBlock
*rb
);
57 ram_addr_t
qemu_ram_get_offset(RAMBlock
*rb
);
58 ram_addr_t
qemu_ram_get_used_length(RAMBlock
*rb
);
59 bool qemu_ram_is_shared(RAMBlock
*rb
);
60 bool qemu_ram_is_uf_zeroable(RAMBlock
*rb
);
61 void qemu_ram_set_uf_zeroable(RAMBlock
*rb
);
62 bool qemu_ram_is_migratable(RAMBlock
*rb
);
63 void qemu_ram_set_migratable(RAMBlock
*rb
);
64 void qemu_ram_unset_migratable(RAMBlock
*rb
);
66 size_t qemu_ram_pagesize(RAMBlock
*block
);
67 size_t qemu_ram_pagesize_largest(void);
69 void cpu_physical_memory_rw(hwaddr addr
, void *buf
,
70 hwaddr len
, bool is_write
);
71 static inline void cpu_physical_memory_read(hwaddr addr
,
72 void *buf
, hwaddr len
)
74 cpu_physical_memory_rw(addr
, buf
, len
, false);
76 static inline void cpu_physical_memory_write(hwaddr addr
,
77 const void *buf
, hwaddr len
)
79 cpu_physical_memory_rw(addr
, (void *)buf
, len
, true);
81 void *cpu_physical_memory_map(hwaddr addr
,
84 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
85 bool is_write
, hwaddr access_len
);
86 void cpu_register_map_client(QEMUBH
*bh
);
87 void cpu_unregister_map_client(QEMUBH
*bh
);
89 bool cpu_physical_memory_is_io(hwaddr phys_addr
);
91 /* Coalesced MMIO regions are areas where write operations can be reordered.
92 * This usually implies that write operations are side-effect free. This allows
93 * batching which can make a major impact on performance when using
96 void qemu_flush_coalesced_mmio_buffer(void);
98 void cpu_flush_icache_range(hwaddr start
, hwaddr len
);
100 typedef int (RAMBlockIterFunc
)(RAMBlock
*rb
, void *opaque
);
102 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
);
103 int ram_block_discard_range(RAMBlock
*rb
, uint64_t start
, size_t length
);
107 #endif /* CPU_COMMON_H */