4 /* CPU interfaces that are target independent. */
6 #ifndef CONFIG_USER_ONLY
7 #include "exec/hwaddr.h"
10 /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
11 void qemu_init_cpu_list(void);
12 void cpu_list_lock(void);
13 void cpu_list_unlock(void);
15 void tcg_flush_softmmu_tlb(CPUState
*cs
);
17 #if !defined(CONFIG_USER_ONLY)
25 #if defined(HOST_WORDS_BIGENDIAN)
26 #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
28 #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
31 /* address in the RAM (different from a physical address) */
32 #if defined(CONFIG_XEN_BACKEND)
33 typedef uint64_t ram_addr_t
;
34 # define RAM_ADDR_MAX UINT64_MAX
35 # define RAM_ADDR_FMT "%" PRIx64
37 typedef uintptr_t ram_addr_t
;
38 # define RAM_ADDR_MAX UINTPTR_MAX
39 # define RAM_ADDR_FMT "%" PRIxPTR
42 extern ram_addr_t ram_size
;
46 typedef void CPUWriteMemoryFunc(void *opaque
, hwaddr addr
, uint32_t value
);
47 typedef uint32_t CPUReadMemoryFunc(void *opaque
, hwaddr addr
);
49 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
);
50 /* This should not be used by devices. */
51 ram_addr_t
qemu_ram_addr_from_host(void *ptr
);
52 RAMBlock
*qemu_ram_block_by_name(const char *name
);
53 RAMBlock
*qemu_ram_block_from_host(void *ptr
, bool round_offset
,
55 ram_addr_t
qemu_ram_block_host_offset(RAMBlock
*rb
, void *host
);
56 void qemu_ram_set_idstr(RAMBlock
*block
, const char *name
, DeviceState
*dev
);
57 void qemu_ram_unset_idstr(RAMBlock
*block
);
58 const char *qemu_ram_get_idstr(RAMBlock
*rb
);
59 void *qemu_ram_get_host_addr(RAMBlock
*rb
);
60 ram_addr_t
qemu_ram_get_offset(RAMBlock
*rb
);
61 ram_addr_t
qemu_ram_get_used_length(RAMBlock
*rb
);
62 bool qemu_ram_is_shared(RAMBlock
*rb
);
63 bool qemu_ram_is_uf_zeroable(RAMBlock
*rb
);
64 void qemu_ram_set_uf_zeroable(RAMBlock
*rb
);
65 bool qemu_ram_is_migratable(RAMBlock
*rb
);
66 void qemu_ram_set_migratable(RAMBlock
*rb
);
67 void qemu_ram_unset_migratable(RAMBlock
*rb
);
69 size_t qemu_ram_pagesize(RAMBlock
*block
);
70 size_t qemu_ram_pagesize_largest(void);
72 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
73 hwaddr len
, int is_write
);
74 static inline void cpu_physical_memory_read(hwaddr addr
,
75 void *buf
, hwaddr len
)
77 cpu_physical_memory_rw(addr
, buf
, len
, 0);
79 static inline void cpu_physical_memory_write(hwaddr addr
,
80 const void *buf
, hwaddr len
)
82 cpu_physical_memory_rw(addr
, (void *)buf
, len
, 1);
84 void *cpu_physical_memory_map(hwaddr addr
,
87 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
88 int is_write
, hwaddr access_len
);
89 void cpu_register_map_client(QEMUBH
*bh
);
90 void cpu_unregister_map_client(QEMUBH
*bh
);
92 bool cpu_physical_memory_is_io(hwaddr phys_addr
);
94 /* Coalesced MMIO regions are areas where write operations can be reordered.
95 * This usually implies that write operations are side-effect free. This allows
96 * batching which can make a major impact on performance when using
99 void qemu_flush_coalesced_mmio_buffer(void);
101 void cpu_flush_icache_range(hwaddr start
, hwaddr len
);
103 extern struct MemoryRegion io_mem_rom
;
104 extern struct MemoryRegion io_mem_notdirty
;
106 typedef int (RAMBlockIterFunc
)(RAMBlock
*rb
, void *opaque
);
108 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
);
109 int ram_block_discard_range(RAMBlock
*rb
, uint64_t start
, size_t length
);
113 #endif /* CPU_COMMON_H */