2 * Flush the host cpu caches.
4 * This work is licensed under the terms of the GNU GPL, version 2 or later.
5 * See the COPYING file in the top-level directory.
8 #include "qemu/osdep.h"
9 #include "qemu/cacheflush.h"
10 #include "qemu/bitops.h"
13 #if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
15 /* Caches are coherent and do not require flushing; symbol inline. */
17 #elif defined(__aarch64__)
20 /* Apple does not expose CTR_EL0, so we must use system interfaces. */
21 extern void sys_icache_invalidate(void *start
, size_t len
);
22 extern void sys_dcache_flush(void *start
, size_t len
);
23 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
25 sys_dcache_flush((void *)rw
, len
);
26 sys_icache_invalidate((void *)rx
, len
);
31 * TODO: unify this with cacheinfo.c.
32 * We want to save the whole contents of CTR_EL0, so that we
33 * have more than the linesize, but also IDC and DIC.
35 static unsigned int save_ctr_el0
;
36 static void __attribute__((constructor
)) init_ctr_el0(void)
38 asm volatile("mrs\t%0, ctr_el0" : "=r"(save_ctr_el0
));
42 * This is a copy of gcc's __aarch64_sync_cache_range, modified
43 * to fit this three-operand interface.
45 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
47 const unsigned CTR_IDC
= 1u << 28;
48 const unsigned CTR_DIC
= 1u << 29;
49 const unsigned int ctr_el0
= save_ctr_el0
;
50 const uintptr_t icache_lsize
= 4 << extract32(ctr_el0
, 0, 4);
51 const uintptr_t dcache_lsize
= 4 << extract32(ctr_el0
, 16, 4);
55 * If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification
56 * is not required for instruction to data coherence.
58 if (!(ctr_el0
& CTR_IDC
)) {
60 * Loop over the address range, clearing one cache line at once.
61 * Data cache must be flushed to unification first to make sure
62 * the instruction cache fetches the updated data.
64 for (p
= rw
& -dcache_lsize
; p
< rw
+ len
; p
+= dcache_lsize
) {
65 asm volatile("dc\tcvau, %0" : : "r" (p
) : "memory");
67 asm volatile("dsb\tish" : : : "memory");
71 * If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point
72 * of Unification is not required for instruction to data coherence.
74 if (!(ctr_el0
& CTR_DIC
)) {
75 for (p
= rx
& -icache_lsize
; p
< rx
+ len
; p
+= icache_lsize
) {
76 asm volatile("ic\tivau, %0" : : "r"(p
) : "memory");
78 asm volatile ("dsb\tish" : : : "memory");
81 asm volatile("isb" : : : "memory");
83 #endif /* CONFIG_DARWIN */
85 #elif defined(__mips__)
88 #include <machine/sysarch.h>
90 #include <sys/cachectl.h>
93 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
96 cacheflush((void *)rw
, len
, DCACHE
);
98 cacheflush((void *)rx
, len
, ICACHE
);
101 #elif defined(__powerpc__)
103 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
106 size_t dsize
= qemu_dcache_linesize
;
107 size_t isize
= qemu_icache_linesize
;
109 b
= rw
& ~(dsize
- 1);
110 e
= (rw
+ len
+ dsize
- 1) & ~(dsize
- 1);
111 for (p
= b
; p
< e
; p
+= dsize
) {
112 asm volatile ("dcbst 0,%0" : : "r"(p
) : "memory");
114 asm volatile ("sync" : : : "memory");
116 b
= rx
& ~(isize
- 1);
117 e
= (rx
+ len
+ isize
- 1) & ~(isize
- 1);
118 for (p
= b
; p
< e
; p
+= isize
) {
119 asm volatile ("icbi 0,%0" : : "r"(p
) : "memory");
121 asm volatile ("sync" : : : "memory");
122 asm volatile ("isync" : : : "memory");
125 #elif defined(__sparc__)
127 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
129 /* No additional data flush to the RW virtual address required. */
130 uintptr_t p
, end
= (rx
+ len
+ 7) & -8;
131 for (p
= rx
& -8; p
< end
; p
+= 8) {
132 __asm__
__volatile__("flush\t%0" : : "r" (p
));
138 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
141 __builtin___clear_cache((char *)rw
, (char *)rw
+ len
);
143 __builtin___clear_cache((char *)rx
, (char *)rx
+ len
);