2 * Info about, and flushing the host cpu caches.
4 * This work is licensed under the terms of the GNU GPL, version 2 or later.
5 * See the COPYING file in the top-level directory.
8 #include "qemu/osdep.h"
9 #include "qemu/cacheflush.h"
10 #include "qemu/cacheinfo.h"
11 #include "qemu/bitops.h"
12 #include "qemu/host-utils.h"
13 #include "qemu/atomic.h"
16 int qemu_icache_linesize
= 0;
17 int qemu_icache_linesize_log
;
18 int qemu_dcache_linesize
= 0;
19 int qemu_dcache_linesize_log
;
22 * Operating system specific cache detection mechanisms.
27 static void sys_cache_info(int *isize
, int *dsize
)
29 SYSTEM_LOGICAL_PROCESSOR_INFORMATION
*buf
;
35 * Check for the required buffer size first. Note that if the zero
36 * size we use for the probe results in success, then there is no
37 * data available; fail in that case.
39 success
= GetLogicalProcessorInformation(0, &size
);
40 if (success
|| GetLastError() != ERROR_INSUFFICIENT_BUFFER
) {
44 n
= size
/ sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION
);
45 size
= n
* sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION
);
46 buf
= g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION
, n
);
47 if (!GetLogicalProcessorInformation(buf
, &size
)) {
51 for (i
= 0; i
< n
; i
++) {
52 if (buf
[i
].Relationship
== RelationCache
53 && buf
[i
].Cache
.Level
== 1) {
54 switch (buf
[i
].Cache
.Type
) {
56 *isize
= *dsize
= buf
[i
].Cache
.LineSize
;
58 case CacheInstruction
:
59 *isize
= buf
[i
].Cache
.LineSize
;
62 *dsize
= buf
[i
].Cache
.LineSize
;
73 #elif defined(CONFIG_DARWIN)
74 # include <sys/sysctl.h>
75 static void sys_cache_info(int *isize
, int *dsize
)
77 /* There's only a single sysctl for both I/D cache line sizes. */
79 size_t len
= sizeof(size
);
80 if (!sysctlbyname("hw.cachelinesize", &size
, &len
, NULL
, 0)) {
81 *isize
= *dsize
= size
;
84 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
85 # include <sys/sysctl.h>
86 static void sys_cache_info(int *isize
, int *dsize
)
88 /* There's only a single sysctl for both I/D cache line sizes. */
90 size_t len
= sizeof(size
);
91 if (!sysctlbyname("machdep.cacheline_size", &size
, &len
, NULL
, 0)) {
92 *isize
= *dsize
= size
;
98 static void sys_cache_info(int *isize
, int *dsize
)
100 # ifdef _SC_LEVEL1_ICACHE_LINESIZE
101 int tmp_isize
= (int) sysconf(_SC_LEVEL1_ICACHE_LINESIZE
);
106 # ifdef _SC_LEVEL1_DCACHE_LINESIZE
107 int tmp_dsize
= (int) sysconf(_SC_LEVEL1_DCACHE_LINESIZE
);
113 #endif /* sys_cache_info */
117 * Architecture (+ OS) specific cache detection mechanisms.
120 #if defined(__powerpc__)
121 static bool have_coherent_icache
;
124 #if defined(__aarch64__) && !defined(CONFIG_DARWIN)
125 /* Apple does not expose CTR_EL0, so we must use system interfaces. */
126 static uint64_t save_ctr_el0
;
127 static void arch_cache_info(int *isize
, int *dsize
)
132 * The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1,
133 * but (at least under Linux) these are marked protected by the
134 * kernel. However, CTR_EL0 contains the minimum linesize in the
135 * entire hierarchy, and is used by userspace cache flushing.
137 * We will also use this value in flush_idcache_range.
139 asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr
));
142 if (*isize
== 0 || *dsize
== 0) {
144 *isize
= 4 << (ctr
& 0xf);
147 *dsize
= 4 << ((ctr
>> 16) & 0xf);
152 #elif defined(_ARCH_PPC) && defined(__linux__)
155 static void arch_cache_info(int *isize
, int *dsize
)
158 *isize
= qemu_getauxval(AT_ICACHEBSIZE
);
161 *dsize
= qemu_getauxval(AT_DCACHEBSIZE
);
163 have_coherent_icache
= qemu_getauxval(AT_HWCAP
) & PPC_FEATURE_ICACHE_SNOOP
;
167 static void arch_cache_info(int *isize
, int *dsize
) { }
168 #endif /* arch_cache_info */
171 * ... and if all else fails ...
174 static void fallback_cache_info(int *isize
, int *dsize
)
176 /* If we can only find one of the two, assume they're the same. */
186 #if defined(_ARCH_PPC)
188 * For PPC, we're going to use the cache sizes computed for
189 * flush_idcache_range. Which means that we must use the
190 * architecture minimum.
192 *isize
= *dsize
= 16;
194 /* Otherwise, 64 bytes is not uncommon. */
195 *isize
= *dsize
= 64;
200 static void __attribute__((constructor
)) init_cache_info(void)
202 int isize
= 0, dsize
= 0;
204 sys_cache_info(&isize
, &dsize
);
205 arch_cache_info(&isize
, &dsize
);
206 fallback_cache_info(&isize
, &dsize
);
208 assert((isize
& (isize
- 1)) == 0);
209 assert((dsize
& (dsize
- 1)) == 0);
211 qemu_icache_linesize
= isize
;
212 qemu_icache_linesize_log
= ctz32(isize
);
213 qemu_dcache_linesize
= dsize
;
214 qemu_dcache_linesize_log
= ctz32(dsize
);
221 * Architecture (+ OS) specific cache flushing mechanisms.
224 #if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
226 /* Caches are coherent and do not require flushing; symbol inline. */
228 #elif defined(__aarch64__)
231 /* Apple does not expose CTR_EL0, so we must use system interfaces. */
232 extern void sys_icache_invalidate(void *start
, size_t len
);
233 extern void sys_dcache_flush(void *start
, size_t len
);
234 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
236 sys_dcache_flush((void *)rw
, len
);
237 sys_icache_invalidate((void *)rx
, len
);
242 * This is a copy of gcc's __aarch64_sync_cache_range, modified
243 * to fit this three-operand interface.
245 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
247 const unsigned CTR_IDC
= 1u << 28;
248 const unsigned CTR_DIC
= 1u << 29;
249 const uint64_t ctr_el0
= save_ctr_el0
;
250 const uintptr_t icache_lsize
= qemu_icache_linesize
;
251 const uintptr_t dcache_lsize
= qemu_dcache_linesize
;
255 * If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification
256 * is not required for instruction to data coherence.
258 if (!(ctr_el0
& CTR_IDC
)) {
260 * Loop over the address range, clearing one cache line at once.
261 * Data cache must be flushed to unification first to make sure
262 * the instruction cache fetches the updated data.
264 for (p
= rw
& -dcache_lsize
; p
< rw
+ len
; p
+= dcache_lsize
) {
265 asm volatile("dc\tcvau, %0" : : "r" (p
) : "memory");
267 asm volatile("dsb\tish" : : : "memory");
271 * If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point
272 * of Unification is not required for instruction to data coherence.
274 if (!(ctr_el0
& CTR_DIC
)) {
275 for (p
= rx
& -icache_lsize
; p
< rx
+ len
; p
+= icache_lsize
) {
276 asm volatile("ic\tivau, %0" : : "r"(p
) : "memory");
278 asm volatile ("dsb\tish" : : : "memory");
281 asm volatile("isb" : : : "memory");
283 #endif /* CONFIG_DARWIN */
285 #elif defined(__mips__)
288 #include <machine/sysarch.h>
290 #include <sys/cachectl.h>
293 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
296 cacheflush((void *)rw
, len
, DCACHE
);
298 cacheflush((void *)rx
, len
, ICACHE
);
301 #elif defined(__powerpc__)
303 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
309 * Some processors have coherent caches and support a simplified
310 * flushing procedure. See
311 * POWER9 UM, 4.6.2.2 Instruction Cache Block Invalidate (icbi)
312 * https://ibm.ent.box.com/s/tmklq90ze7aj8f4n32er1mu3sy9u8k3k
314 if (have_coherent_icache
) {
315 asm volatile ("sync\n\t"
318 : : "r"(rx
) : "memory");
322 dsize
= qemu_dcache_linesize
;
323 isize
= qemu_icache_linesize
;
325 b
= rw
& ~(dsize
- 1);
326 e
= (rw
+ len
+ dsize
- 1) & ~(dsize
- 1);
327 for (p
= b
; p
< e
; p
+= dsize
) {
328 asm volatile ("dcbst 0,%0" : : "r"(p
) : "memory");
330 asm volatile ("sync" : : : "memory");
332 b
= rx
& ~(isize
- 1);
333 e
= (rx
+ len
+ isize
- 1) & ~(isize
- 1);
334 for (p
= b
; p
< e
; p
+= isize
) {
335 asm volatile ("icbi 0,%0" : : "r"(p
) : "memory");
337 asm volatile ("sync" : : : "memory");
338 asm volatile ("isync" : : : "memory");
341 #elif defined(__sparc__)
343 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
345 /* No additional data flush to the RW virtual address required. */
346 uintptr_t p
, end
= (rx
+ len
+ 7) & -8;
347 for (p
= rx
& -8; p
< end
; p
+= 8) {
348 __asm__
__volatile__("flush\t%0" : : "r" (p
));
354 void flush_idcache_range(uintptr_t rx
, uintptr_t rw
, size_t len
)
357 __builtin___clear_cache((char *)rw
, (char *)rw
+ len
);
359 __builtin___clear_cache((char *)rx
, (char *)rx
+ len
);