1 /* Initialize x86 cache info.
2 Copyright (C) 2020-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 static const struct intel_02_cache_info
23 unsigned char linesize
;
24 unsigned char rel_name
;
28 #define M(sc) ((sc) - _SC_LEVEL1_ICACHE_SIZE)
29 { 0x06, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE
), 8192 },
30 { 0x08, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE
), 16384 },
31 { 0x09, 4, 32, M(_SC_LEVEL1_ICACHE_SIZE
), 32768 },
32 { 0x0a, 2, 32, M(_SC_LEVEL1_DCACHE_SIZE
), 8192 },
33 { 0x0c, 4, 32, M(_SC_LEVEL1_DCACHE_SIZE
), 16384 },
34 { 0x0d, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE
), 16384 },
35 { 0x0e, 6, 64, M(_SC_LEVEL1_DCACHE_SIZE
), 24576 },
36 { 0x21, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 262144 },
37 { 0x22, 4, 64, M(_SC_LEVEL3_CACHE_SIZE
), 524288 },
38 { 0x23, 8, 64, M(_SC_LEVEL3_CACHE_SIZE
), 1048576 },
39 { 0x25, 8, 64, M(_SC_LEVEL3_CACHE_SIZE
), 2097152 },
40 { 0x29, 8, 64, M(_SC_LEVEL3_CACHE_SIZE
), 4194304 },
41 { 0x2c, 8, 64, M(_SC_LEVEL1_DCACHE_SIZE
), 32768 },
42 { 0x30, 8, 64, M(_SC_LEVEL1_ICACHE_SIZE
), 32768 },
43 { 0x39, 4, 64, M(_SC_LEVEL2_CACHE_SIZE
), 131072 },
44 { 0x3a, 6, 64, M(_SC_LEVEL2_CACHE_SIZE
), 196608 },
45 { 0x3b, 2, 64, M(_SC_LEVEL2_CACHE_SIZE
), 131072 },
46 { 0x3c, 4, 64, M(_SC_LEVEL2_CACHE_SIZE
), 262144 },
47 { 0x3d, 6, 64, M(_SC_LEVEL2_CACHE_SIZE
), 393216 },
48 { 0x3e, 4, 64, M(_SC_LEVEL2_CACHE_SIZE
), 524288 },
49 { 0x3f, 2, 64, M(_SC_LEVEL2_CACHE_SIZE
), 262144 },
50 { 0x41, 4, 32, M(_SC_LEVEL2_CACHE_SIZE
), 131072 },
51 { 0x42, 4, 32, M(_SC_LEVEL2_CACHE_SIZE
), 262144 },
52 { 0x43, 4, 32, M(_SC_LEVEL2_CACHE_SIZE
), 524288 },
53 { 0x44, 4, 32, M(_SC_LEVEL2_CACHE_SIZE
), 1048576 },
54 { 0x45, 4, 32, M(_SC_LEVEL2_CACHE_SIZE
), 2097152 },
55 { 0x46, 4, 64, M(_SC_LEVEL3_CACHE_SIZE
), 4194304 },
56 { 0x47, 8, 64, M(_SC_LEVEL3_CACHE_SIZE
), 8388608 },
57 { 0x48, 12, 64, M(_SC_LEVEL2_CACHE_SIZE
), 3145728 },
58 { 0x49, 16, 64, M(_SC_LEVEL2_CACHE_SIZE
), 4194304 },
59 { 0x4a, 12, 64, M(_SC_LEVEL3_CACHE_SIZE
), 6291456 },
60 { 0x4b, 16, 64, M(_SC_LEVEL3_CACHE_SIZE
), 8388608 },
61 { 0x4c, 12, 64, M(_SC_LEVEL3_CACHE_SIZE
), 12582912 },
62 { 0x4d, 16, 64, M(_SC_LEVEL3_CACHE_SIZE
), 16777216 },
63 { 0x4e, 24, 64, M(_SC_LEVEL2_CACHE_SIZE
), 6291456 },
64 { 0x60, 8, 64, M(_SC_LEVEL1_DCACHE_SIZE
), 16384 },
65 { 0x66, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE
), 8192 },
66 { 0x67, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE
), 16384 },
67 { 0x68, 4, 64, M(_SC_LEVEL1_DCACHE_SIZE
), 32768 },
68 { 0x78, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 1048576 },
69 { 0x79, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 131072 },
70 { 0x7a, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 262144 },
71 { 0x7b, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 524288 },
72 { 0x7c, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 1048576 },
73 { 0x7d, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 2097152 },
74 { 0x7f, 2, 64, M(_SC_LEVEL2_CACHE_SIZE
), 524288 },
75 { 0x80, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 524288 },
76 { 0x82, 8, 32, M(_SC_LEVEL2_CACHE_SIZE
), 262144 },
77 { 0x83, 8, 32, M(_SC_LEVEL2_CACHE_SIZE
), 524288 },
78 { 0x84, 8, 32, M(_SC_LEVEL2_CACHE_SIZE
), 1048576 },
79 { 0x85, 8, 32, M(_SC_LEVEL2_CACHE_SIZE
), 2097152 },
80 { 0x86, 4, 64, M(_SC_LEVEL2_CACHE_SIZE
), 524288 },
81 { 0x87, 8, 64, M(_SC_LEVEL2_CACHE_SIZE
), 1048576 },
82 { 0xd0, 4, 64, M(_SC_LEVEL3_CACHE_SIZE
), 524288 },
83 { 0xd1, 4, 64, M(_SC_LEVEL3_CACHE_SIZE
), 1048576 },
84 { 0xd2, 4, 64, M(_SC_LEVEL3_CACHE_SIZE
), 2097152 },
85 { 0xd6, 8, 64, M(_SC_LEVEL3_CACHE_SIZE
), 1048576 },
86 { 0xd7, 8, 64, M(_SC_LEVEL3_CACHE_SIZE
), 2097152 },
87 { 0xd8, 8, 64, M(_SC_LEVEL3_CACHE_SIZE
), 4194304 },
88 { 0xdc, 12, 64, M(_SC_LEVEL3_CACHE_SIZE
), 2097152 },
89 { 0xdd, 12, 64, M(_SC_LEVEL3_CACHE_SIZE
), 4194304 },
90 { 0xde, 12, 64, M(_SC_LEVEL3_CACHE_SIZE
), 8388608 },
91 { 0xe2, 16, 64, M(_SC_LEVEL3_CACHE_SIZE
), 2097152 },
92 { 0xe3, 16, 64, M(_SC_LEVEL3_CACHE_SIZE
), 4194304 },
93 { 0xe4, 16, 64, M(_SC_LEVEL3_CACHE_SIZE
), 8388608 },
94 { 0xea, 24, 64, M(_SC_LEVEL3_CACHE_SIZE
), 12582912 },
95 { 0xeb, 24, 64, M(_SC_LEVEL3_CACHE_SIZE
), 18874368 },
96 { 0xec, 24, 64, M(_SC_LEVEL3_CACHE_SIZE
), 25165824 },
99 #define nintel_02_known (sizeof (intel_02_known) / sizeof (intel_02_known [0]))
102 intel_02_known_compare (const void *p1
, const void *p2
)
104 const struct intel_02_cache_info
*i1
;
105 const struct intel_02_cache_info
*i2
;
107 i1
= (const struct intel_02_cache_info
*) p1
;
108 i2
= (const struct intel_02_cache_info
*) p2
;
110 if (i1
->idx
== i2
->idx
)
113 return i1
->idx
< i2
->idx
? -1 : 1;
118 __attribute__ ((noinline
))
119 intel_check_word (int name
, unsigned int value
, bool *has_level_2
,
120 bool *no_level_2_or_3
,
121 const struct cpu_features
*cpu_features
)
123 if ((value
& 0x80000000) != 0)
124 /* The register value is reserved. */
127 /* Fold the name. The _SC_ constants are always in the order SIZE,
129 int folded_rel_name
= (M(name
) / 3) * 3;
133 unsigned int byte
= value
& 0xff;
137 *no_level_2_or_3
= true;
139 if (folded_rel_name
== M(_SC_LEVEL3_CACHE_SIZE
))
140 /* No need to look further. */
143 else if (byte
== 0xff)
145 /* CPUID leaf 0x4 contains all the information. We need to
152 unsigned int round
= 0;
155 __cpuid_count (4, round
, eax
, ebx
, ecx
, edx
);
157 enum { null
= 0, data
= 1, inst
= 2, uni
= 3 } type
= eax
& 0x1f;
159 /* That was the end. */
162 unsigned int level
= (eax
>> 5) & 0x7;
164 if ((level
== 1 && type
== data
165 && folded_rel_name
== M(_SC_LEVEL1_DCACHE_SIZE
))
166 || (level
== 1 && type
== inst
167 && folded_rel_name
== M(_SC_LEVEL1_ICACHE_SIZE
))
168 || (level
== 2 && folded_rel_name
== M(_SC_LEVEL2_CACHE_SIZE
))
169 || (level
== 3 && folded_rel_name
== M(_SC_LEVEL3_CACHE_SIZE
))
170 || (level
== 4 && folded_rel_name
== M(_SC_LEVEL4_CACHE_SIZE
)))
172 unsigned int offset
= M(name
) - folded_rel_name
;
176 return (((ebx
>> 22) + 1)
177 * (((ebx
>> 12) & 0x3ff) + 1)
178 * ((ebx
& 0xfff) + 1)
181 return (ebx
>> 22) + 1;
183 assert (offset
== 2);
184 return (ebx
& 0xfff) + 1;
189 /* There is no other cache information anywhere else. */
194 if (byte
== 0x49 && folded_rel_name
== M(_SC_LEVEL3_CACHE_SIZE
))
196 /* Intel reused this value. For family 15, model 6 it
197 specifies the 3rd level cache. Otherwise the 2nd
199 unsigned int family
= cpu_features
->basic
.family
;
200 unsigned int model
= cpu_features
->basic
.model
;
202 if (family
== 15 && model
== 6)
204 /* The level 3 cache is encoded for this model like
205 the level 2 cache is for other models. Pretend
206 the caller asked for the level 2 cache. */
207 name
= (_SC_LEVEL2_CACHE_SIZE
208 + (name
- _SC_LEVEL3_CACHE_SIZE
));
209 folded_rel_name
= M(_SC_LEVEL2_CACHE_SIZE
);
213 struct intel_02_cache_info
*found
;
214 struct intel_02_cache_info search
;
217 found
= bsearch (&search
, intel_02_known
, nintel_02_known
,
218 sizeof (intel_02_known
[0]), intel_02_known_compare
);
221 if (found
->rel_name
== folded_rel_name
)
223 unsigned int offset
= M(name
) - folded_rel_name
;
231 assert (offset
== 2);
232 return found
->linesize
;
235 if (found
->rel_name
== M(_SC_LEVEL2_CACHE_SIZE
))
240 /* Next byte for the next round. */
249 static long int __attribute__ ((noinline
))
250 handle_intel (int name
, const struct cpu_features
*cpu_features
)
252 unsigned int maxidx
= cpu_features
->basic
.max_cpuid
;
254 /* Return -1 for older CPUs. */
258 /* OK, we can use the CPUID instruction to get all info about the
261 bool no_level_2_or_3
= false;
262 bool has_level_2
= false;
267 __cpuid (2, eax
, ebx
, ecx
, edx
);
269 /* The low byte of EAX of CPUID leaf 2 should always return 1 and it
270 should be ignored. If it isn't 1, use CPUID leaf 4 instead. */
271 if ((eax
& 0xff) != 1)
272 return intel_check_word (name
, 0xff, &has_level_2
, &no_level_2_or_3
,
278 /* Process the individual registers' value. */
279 result
= intel_check_word (name
, eax
, &has_level_2
,
280 &no_level_2_or_3
, cpu_features
);
284 result
= intel_check_word (name
, ebx
, &has_level_2
,
285 &no_level_2_or_3
, cpu_features
);
289 result
= intel_check_word (name
, ecx
, &has_level_2
,
290 &no_level_2_or_3
, cpu_features
);
294 result
= intel_check_word (name
, edx
, &has_level_2
,
295 &no_level_2_or_3
, cpu_features
);
300 if (name
>= _SC_LEVEL2_CACHE_SIZE
&& name
<= _SC_LEVEL3_CACHE_LINESIZE
308 static long int __attribute__ ((noinline
))
309 handle_amd (int name
)
313 unsigned int ecx
= 0;
315 unsigned int max_cpuid
= 0;
318 /* No level 4 cache (yet). */
319 if (name
> _SC_LEVEL3_CACHE_LINESIZE
)
322 __cpuid (0x80000000, max_cpuid
, ebx
, ecx
, edx
);
324 if (max_cpuid
>= 0x8000001D)
325 /* Use __cpuid__ '0x8000_001D' to compute cache details. */
327 unsigned int count
= 0x1;
329 if (name
>= _SC_LEVEL3_CACHE_SIZE
)
331 else if (name
>= _SC_LEVEL2_CACHE_SIZE
)
333 else if (name
>= _SC_LEVEL1_DCACHE_SIZE
)
336 __cpuid_count (0x8000001D, count
, eax
, ebx
, ecx
, edx
);
342 case _SC_LEVEL1_ICACHE_ASSOC
:
343 case _SC_LEVEL1_DCACHE_ASSOC
:
344 case _SC_LEVEL2_CACHE_ASSOC
:
345 case _SC_LEVEL3_CACHE_ASSOC
:
346 return ((ebx
>> 22) & 0x3ff) + 1;
347 case _SC_LEVEL1_ICACHE_LINESIZE
:
348 case _SC_LEVEL1_DCACHE_LINESIZE
:
349 case _SC_LEVEL2_CACHE_LINESIZE
:
350 case _SC_LEVEL3_CACHE_LINESIZE
:
351 return (ebx
& 0xfff) + 1;
352 case _SC_LEVEL1_ICACHE_SIZE
:
353 case _SC_LEVEL1_DCACHE_SIZE
:
354 case _SC_LEVEL2_CACHE_SIZE
:
355 case _SC_LEVEL3_CACHE_SIZE
:
356 return (((ebx
>> 22) & 0x3ff) + 1) * ((ebx
& 0xfff) + 1) * (ecx
+ 1);
358 __builtin_unreachable ();
364 /* Legacy cache computation for CPUs prior to Bulldozer family.
365 This is also a fail-safe mechanism for some hypervisors that
366 accidentally configure __cpuid__ '0x8000_001D' to Zero. */
368 fn
= 0x80000005 + (name
>= _SC_LEVEL2_CACHE_SIZE
);
373 __cpuid (fn
, eax
, ebx
, ecx
, edx
);
375 if (name
< _SC_LEVEL1_DCACHE_SIZE
)
377 name
+= _SC_LEVEL1_DCACHE_SIZE
- _SC_LEVEL1_ICACHE_SIZE
;
383 case _SC_LEVEL1_DCACHE_SIZE
:
384 return (ecx
>> 14) & 0x3fc00;
386 case _SC_LEVEL1_DCACHE_ASSOC
:
388 if ((ecx
& 0xff) == 0xff)
390 /* Fully associative. */
391 return (ecx
<< 2) & 0x3fc00;
395 case _SC_LEVEL1_DCACHE_LINESIZE
:
398 case _SC_LEVEL2_CACHE_SIZE
:
399 return (ecx
& 0xf000) == 0 ? 0 : (ecx
>> 6) & 0x3fffc00;
401 case _SC_LEVEL2_CACHE_ASSOC
:
402 switch ((ecx
>> 12) & 0xf)
408 return (ecx
>> 12) & 0xf;
424 return ((ecx
>> 6) & 0x3fffc00) / (ecx
& 0xff);
429 case _SC_LEVEL2_CACHE_LINESIZE
:
430 return (ecx
& 0xf000) == 0 ? 0 : ecx
& 0xff;
432 case _SC_LEVEL3_CACHE_SIZE
:
434 long int total_l3_cache
= 0, l3_cache_per_thread
= 0;
435 unsigned int threads
= 0;
436 const struct cpu_features
*cpu_features
;
438 if ((edx
& 0xf000) == 0)
441 total_l3_cache
= (edx
& 0x3ffc0000) << 1;
442 cpu_features
= __get_cpu_features ();
444 /* Figure out the number of logical threads that share L3. */
445 if (max_cpuid
>= 0x80000008)
447 /* Get width of APIC ID. */
448 __cpuid (0x80000008, eax
, ebx
, ecx
, edx
);
449 threads
= (ecx
& 0xff) + 1;
454 /* If APIC ID width is not available, use logical
456 __cpuid (0x00000001, eax
, ebx
, ecx
, edx
);
457 if ((edx
& (1 << 28)) != 0)
458 threads
= (ebx
>> 16) & 0xff;
461 /* Cap usage of highest cache level to the number of
462 supported threads. */
464 l3_cache_per_thread
= total_l3_cache
/threads
;
466 /* Get shared cache per ccx for Zen architectures. */
467 if (cpu_features
->basic
.family
>= 0x17)
469 long int l3_cache_per_ccx
= 0;
470 /* Get number of threads share the L3 cache in CCX. */
471 __cpuid_count (0x8000001D, 0x3, eax
, ebx
, ecx
, edx
);
472 unsigned int threads_per_ccx
= ((eax
>> 14) & 0xfff) + 1;
473 l3_cache_per_ccx
= l3_cache_per_thread
* threads_per_ccx
;
474 return l3_cache_per_ccx
;
478 return l3_cache_per_thread
;
482 case _SC_LEVEL3_CACHE_ASSOC
:
483 switch ((edx
>> 12) & 0xf)
489 return (edx
>> 12) & 0xf;
505 return ((edx
& 0x3ffc0000) << 1) / (edx
& 0xff);
510 case _SC_LEVEL3_CACHE_LINESIZE
:
511 return (edx
& 0xf000) == 0 ? 0 : edx
& 0xff;
514 __builtin_unreachable ();
520 static long int __attribute__ ((noinline
))
521 handle_zhaoxin (int name
)
528 int folded_rel_name
= (M(name
) / 3) * 3;
530 unsigned int round
= 0;
533 __cpuid_count (4, round
, eax
, ebx
, ecx
, edx
);
535 enum { null
= 0, data
= 1, inst
= 2, uni
= 3 } type
= eax
& 0x1f;
539 unsigned int level
= (eax
>> 5) & 0x7;
541 if ((level
== 1 && type
== data
542 && folded_rel_name
== M(_SC_LEVEL1_DCACHE_SIZE
))
543 || (level
== 1 && type
== inst
544 && folded_rel_name
== M(_SC_LEVEL1_ICACHE_SIZE
))
545 || (level
== 2 && folded_rel_name
== M(_SC_LEVEL2_CACHE_SIZE
))
546 || (level
== 3 && folded_rel_name
== M(_SC_LEVEL3_CACHE_SIZE
)))
548 unsigned int offset
= M(name
) - folded_rel_name
;
552 return (((ebx
>> 22) + 1)
553 * (((ebx
>> 12) & 0x3ff) + 1)
554 * ((ebx
& 0xfff) + 1)
557 return (ebx
>> 22) + 1;
559 assert (offset
== 2);
560 return (ebx
& 0xfff) + 1;
571 get_common_cache_info (long int *shared_ptr
, long int * shared_per_thread_ptr
, unsigned int *threads_ptr
,
579 /* Number of logical processors sharing L2 cache. */
582 /* Number of logical processors sharing L3 cache. */
585 const struct cpu_features
*cpu_features
= __get_cpu_features ();
586 int max_cpuid
= cpu_features
->basic
.max_cpuid
;
587 unsigned int family
= cpu_features
->basic
.family
;
588 unsigned int model
= cpu_features
->basic
.model
;
589 long int shared
= *shared_ptr
;
590 long int shared_per_thread
= *shared_per_thread_ptr
;
591 unsigned int threads
= *threads_ptr
;
592 bool inclusive_cache
= true;
593 bool support_count_mask
= true;
596 unsigned int level
= 3;
598 if (cpu_features
->basic
.kind
== arch_kind_zhaoxin
&& family
== 6)
599 support_count_mask
= false;
603 /* Try L2 otherwise. */
606 shared_per_thread
= core
;
616 /* A value of 0 for the HTT bit indicates there is only a single
617 logical processor. */
618 if (HAS_CPU_FEATURE (HTT
))
620 /* Figure out the number of logical threads that share the
621 highest cache level. */
626 /* Query until cache level 2 and 3 are enumerated. */
627 int check
= 0x1 | (threads_l3
== 0) << 1;
630 __cpuid_count (4, i
++, eax
, ebx
, ecx
, edx
);
632 /* There seems to be a bug in at least some Pentium Ds
633 which sometimes fail to iterate all cache parameters.
634 Do not loop indefinitely here, stop in this case and
635 assume there is no such information. */
636 if (cpu_features
->basic
.kind
== arch_kind_intel
637 && (eax
& 0x1f) == 0 )
638 goto intel_bug_no_cache_info
;
640 switch ((eax
>> 5) & 0x7)
647 /* Get maximum number of logical processors
649 threads_l2
= (eax
>> 14) & 0x3ff;
654 if ((check
& (0x1 << 1)))
656 /* Get maximum number of logical processors
658 threads_l3
= (eax
>> 14) & 0x3ff;
660 /* Check if L2 and L3 caches are inclusive. */
661 inclusive_cache
= (edx
& 0x2) != 0;
662 check
&= ~(0x1 << 1);
669 /* If max_cpuid >= 11, THREADS_L2/THREADS_L3 are the maximum
670 numbers of addressable IDs for logical processors sharing
671 the cache, instead of the maximum number of threads
672 sharing the cache. */
673 if (max_cpuid
>= 11 && support_count_mask
)
675 /* Find the number of logical processors shipped in
676 one core and apply count mask. */
679 /* Count SMT only if there is L3 cache. Always count
680 core if there is no L3 cache. */
681 int count
= ((threads_l2
> 0 && level
== 3)
683 || (threads_l2
> 0 && level
== 2)) << 1));
687 __cpuid_count (11, i
++, eax
, ebx
, ecx
, edx
);
689 int shipped
= ebx
& 0xff;
690 int type
= ecx
& 0xff00;
691 if (shipped
== 0 || type
== 0)
693 else if (type
== 0x100)
700 /* Compute count mask. */
702 : "=r" (count_mask
) : "g" (threads_l2
));
703 count_mask
= ~(-1 << (count_mask
+ 1));
704 threads_l2
= (shipped
- 1) & count_mask
;
708 else if (type
== 0x200)
711 if ((count
& (0x1 << 1)))
715 = (level
== 2 ? threads_l2
: threads_l3
);
717 /* Compute count mask. */
719 : "=r" (count_mask
) : "g" (threads_core
));
720 count_mask
= ~(-1 << (count_mask
+ 1));
721 threads_core
= (shipped
- 1) & count_mask
;
723 threads_l2
= threads_core
;
725 threads_l3
= threads_core
;
726 count
&= ~(0x1 << 1);
739 threads
= threads_l2
;
740 if (cpu_features
->basic
.kind
== arch_kind_intel
750 /* Silvermont has L2 cache shared by 2 cores. */
759 threads
= threads_l3
;
763 intel_bug_no_cache_info
:
764 /* Assume that all logical threads share the highest cache
766 threads
= ((cpu_features
->features
[CPUID_INDEX_1
].cpuid
.ebx
>> 16)
769 /* Get per-thread size of highest level cache. */
770 if (shared_per_thread
> 0 && threads
> 0)
771 shared_per_thread
/= threads
;
774 /* Account for non-inclusive L2 and L3 caches. */
775 if (!inclusive_cache
)
777 long int core_per_thread
= threads_l2
> 0 ? (core
/ threads_l2
) : core
;
778 shared_per_thread
+= core_per_thread
;
782 *shared_ptr
= shared
;
783 *shared_per_thread_ptr
= shared_per_thread
;
784 *threads_ptr
= threads
;
788 dl_init_cacheinfo (struct cpu_features
*cpu_features
)
790 /* Find out what brand of processor. */
792 long int shared
= -1;
793 long int shared_per_thread
= -1;
795 unsigned int threads
= 0;
796 unsigned long int level1_icache_size
= -1;
797 unsigned long int level1_icache_linesize
= -1;
798 unsigned long int level1_dcache_size
= -1;
799 unsigned long int level1_dcache_assoc
= -1;
800 unsigned long int level1_dcache_linesize
= -1;
801 unsigned long int level2_cache_size
= -1;
802 unsigned long int level2_cache_assoc
= -1;
803 unsigned long int level2_cache_linesize
= -1;
804 unsigned long int level3_cache_size
= -1;
805 unsigned long int level3_cache_assoc
= -1;
806 unsigned long int level3_cache_linesize
= -1;
807 unsigned long int level4_cache_size
= -1;
809 if (cpu_features
->basic
.kind
== arch_kind_intel
)
811 data
= handle_intel (_SC_LEVEL1_DCACHE_SIZE
, cpu_features
);
812 core
= handle_intel (_SC_LEVEL2_CACHE_SIZE
, cpu_features
);
813 shared
= handle_intel (_SC_LEVEL3_CACHE_SIZE
, cpu_features
);
814 shared_per_thread
= shared
;
817 = handle_intel (_SC_LEVEL1_ICACHE_SIZE
, cpu_features
);
818 level1_icache_linesize
819 = handle_intel (_SC_LEVEL1_ICACHE_LINESIZE
, cpu_features
);
820 level1_dcache_size
= data
;
822 = handle_intel (_SC_LEVEL1_DCACHE_ASSOC
, cpu_features
);
823 level1_dcache_linesize
824 = handle_intel (_SC_LEVEL1_DCACHE_LINESIZE
, cpu_features
);
825 level2_cache_size
= core
;
827 = handle_intel (_SC_LEVEL2_CACHE_ASSOC
, cpu_features
);
828 level2_cache_linesize
829 = handle_intel (_SC_LEVEL2_CACHE_LINESIZE
, cpu_features
);
830 level3_cache_size
= shared
;
832 = handle_intel (_SC_LEVEL3_CACHE_ASSOC
, cpu_features
);
833 level3_cache_linesize
834 = handle_intel (_SC_LEVEL3_CACHE_LINESIZE
, cpu_features
);
836 = handle_intel (_SC_LEVEL4_CACHE_SIZE
, cpu_features
);
838 get_common_cache_info (&shared
, &shared_per_thread
, &threads
, core
);
840 else if (cpu_features
->basic
.kind
== arch_kind_zhaoxin
)
842 data
= handle_zhaoxin (_SC_LEVEL1_DCACHE_SIZE
);
843 core
= handle_zhaoxin (_SC_LEVEL2_CACHE_SIZE
);
844 shared
= handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE
);
845 shared_per_thread
= shared
;
847 level1_icache_size
= handle_zhaoxin (_SC_LEVEL1_ICACHE_SIZE
);
848 level1_icache_linesize
= handle_zhaoxin (_SC_LEVEL1_ICACHE_LINESIZE
);
849 level1_dcache_size
= data
;
850 level1_dcache_assoc
= handle_zhaoxin (_SC_LEVEL1_DCACHE_ASSOC
);
851 level1_dcache_linesize
= handle_zhaoxin (_SC_LEVEL1_DCACHE_LINESIZE
);
852 level2_cache_size
= core
;
853 level2_cache_assoc
= handle_zhaoxin (_SC_LEVEL2_CACHE_ASSOC
);
854 level2_cache_linesize
= handle_zhaoxin (_SC_LEVEL2_CACHE_LINESIZE
);
855 level3_cache_size
= shared
;
856 level3_cache_assoc
= handle_zhaoxin (_SC_LEVEL3_CACHE_ASSOC
);
857 level3_cache_linesize
= handle_zhaoxin (_SC_LEVEL3_CACHE_LINESIZE
);
859 get_common_cache_info (&shared
, &shared_per_thread
, &threads
, core
);
861 else if (cpu_features
->basic
.kind
== arch_kind_amd
)
863 data
= handle_amd (_SC_LEVEL1_DCACHE_SIZE
);
864 core
= handle_amd (_SC_LEVEL2_CACHE_SIZE
);
865 shared
= handle_amd (_SC_LEVEL3_CACHE_SIZE
);
867 level1_icache_size
= handle_amd (_SC_LEVEL1_ICACHE_SIZE
);
868 level1_icache_linesize
= handle_amd (_SC_LEVEL1_ICACHE_LINESIZE
);
869 level1_dcache_size
= data
;
870 level1_dcache_assoc
= handle_amd (_SC_LEVEL1_DCACHE_ASSOC
);
871 level1_dcache_linesize
= handle_amd (_SC_LEVEL1_DCACHE_LINESIZE
);
872 level2_cache_size
= core
;
873 level2_cache_assoc
= handle_amd (_SC_LEVEL2_CACHE_ASSOC
);
874 level2_cache_linesize
= handle_amd (_SC_LEVEL2_CACHE_LINESIZE
);
875 level3_cache_size
= shared
;
876 level3_cache_assoc
= handle_amd (_SC_LEVEL3_CACHE_ASSOC
);
877 level3_cache_linesize
= handle_amd (_SC_LEVEL3_CACHE_LINESIZE
);
878 level4_cache_size
= handle_amd (_SC_LEVEL4_CACHE_SIZE
);
882 /* No shared L3 cache. All we have is the L2 cache. */
885 else if (cpu_features
->basic
.family
< 0x17)
887 /* Account for exclusive L2 and L3 caches. */
891 shared_per_thread
= shared
;
894 cpu_features
->level1_icache_size
= level1_icache_size
;
895 cpu_features
->level1_icache_linesize
= level1_icache_linesize
;
896 cpu_features
->level1_dcache_size
= level1_dcache_size
;
897 cpu_features
->level1_dcache_assoc
= level1_dcache_assoc
;
898 cpu_features
->level1_dcache_linesize
= level1_dcache_linesize
;
899 cpu_features
->level2_cache_size
= level2_cache_size
;
900 cpu_features
->level2_cache_assoc
= level2_cache_assoc
;
901 cpu_features
->level2_cache_linesize
= level2_cache_linesize
;
902 cpu_features
->level3_cache_size
= level3_cache_size
;
903 cpu_features
->level3_cache_assoc
= level3_cache_assoc
;
904 cpu_features
->level3_cache_linesize
= level3_cache_linesize
;
905 cpu_features
->level4_cache_size
= level4_cache_size
;
907 unsigned long int cachesize_non_temporal_divisor
908 = cpu_features
->cachesize_non_temporal_divisor
;
909 if (cachesize_non_temporal_divisor
<= 0)
910 cachesize_non_temporal_divisor
= 4;
912 /* The default setting for the non_temporal threshold is [1/8, 1/2] of size
913 of the chip's cache (depending on `cachesize_non_temporal_divisor` which
914 is microarch specific. The default is 1/4). For most Intel processors
915 with an initial release date between 2017 and 2023, a thread's
916 typical share of the cache is from 18-64MB. Using a reasonable size
917 fraction of L3 is meant to estimate the point where non-temporal stores
918 begin out-competing REP MOVSB. As well the point where the fact that
919 non-temporal stores are forced back to main memory would already occurred
920 to the majority of the lines in the copy. Note, concerns about the entire
921 L3 cache being evicted by the copy are mostly alleviated by the fact that
922 modern HW detects streaming patterns and provides proper LRU hints so that
923 the maximum thrashing capped at 1/associativity. */
924 unsigned long int non_temporal_threshold
925 = shared
/ cachesize_non_temporal_divisor
;
927 /* If the computed non_temporal_threshold <= 3/4 * per-thread L3, we most
928 likely have incorrect/incomplete cache info in which case, default to
929 3/4 * per-thread L3 to avoid regressions. */
930 unsigned long int non_temporal_threshold_lowbound
931 = shared_per_thread
* 3 / 4;
932 if (non_temporal_threshold
< non_temporal_threshold_lowbound
)
933 non_temporal_threshold
= non_temporal_threshold_lowbound
;
935 /* If no ERMS, we use the per-thread L3 chunking. Normal cacheable stores run
936 a higher risk of actually thrashing the cache as they don't have a HW LRU
937 hint. As well, their performance in highly parallel situations is
939 if (!CPU_FEATURE_USABLE_P (cpu_features
, ERMS
))
940 non_temporal_threshold
= non_temporal_threshold_lowbound
;
941 /* SIZE_MAX >> 4 because memmove-vec-unaligned-erms right-shifts the value of
942 'x86_non_temporal_threshold' by `LOG_4X_MEMCPY_THRESH` (4) and it is best
943 if that operation cannot overflow. Minimum of 0x4040 (16448) because the
944 L(large_memset_4x) loops need 64-byte to cache align and enough space for
945 at least 1 iteration of 4x PAGE_SIZE unrolled loop. Both values are
946 reflected in the manual. */
947 unsigned long int maximum_non_temporal_threshold
= SIZE_MAX
>> 4;
948 unsigned long int minimum_non_temporal_threshold
= 0x4040;
950 /* If `non_temporal_threshold` less than `minimum_non_temporal_threshold`
951 it most likely means we failed to detect the cache info. We don't want
952 to default to `minimum_non_temporal_threshold` as such a small value,
953 while correct, has bad performance. We default to 64MB as reasonable
954 default bound. 64MB is likely conservative in that most/all systems would
955 choose a lower value so it should never forcing non-temporal stores when
956 they otherwise wouldn't be used. */
957 if (non_temporal_threshold
< minimum_non_temporal_threshold
)
958 non_temporal_threshold
= 64 * 1024 * 1024;
959 else if (non_temporal_threshold
> maximum_non_temporal_threshold
)
960 non_temporal_threshold
= maximum_non_temporal_threshold
;
962 /* NB: The REP MOVSB threshold must be greater than VEC_SIZE * 8. */
963 unsigned int minimum_rep_movsb_threshold
;
964 /* NB: The default REP MOVSB threshold is 4096 * (VEC_SIZE / 16) for
965 VEC_SIZE == 64 or 32. For VEC_SIZE == 16, the default REP MOVSB
966 threshold is 2048 * (VEC_SIZE / 16). */
967 unsigned int rep_movsb_threshold
;
968 if (CPU_FEATURE_USABLE_P (cpu_features
, AVX512F
)
969 && !CPU_FEATURE_PREFERRED_P (cpu_features
, Prefer_No_AVX512
))
971 rep_movsb_threshold
= 4096 * (64 / 16);
972 minimum_rep_movsb_threshold
= 64 * 8;
974 else if (CPU_FEATURE_PREFERRED_P (cpu_features
,
975 AVX_Fast_Unaligned_Load
))
977 rep_movsb_threshold
= 4096 * (32 / 16);
978 minimum_rep_movsb_threshold
= 32 * 8;
982 rep_movsb_threshold
= 2048 * (16 / 16);
983 minimum_rep_movsb_threshold
= 16 * 8;
985 /* NB: The default REP MOVSB threshold is 2112 on processors with fast
986 short REP MOVSB (FSRM). */
987 if (CPU_FEATURE_USABLE_P (cpu_features
, FSRM
))
988 rep_movsb_threshold
= 2112;
990 /* The default threshold to use Enhanced REP STOSB. */
991 unsigned long int rep_stosb_threshold
= 2048;
993 long int tunable_size
;
995 tunable_size
= TUNABLE_GET (x86_data_cache_size
, long int, NULL
);
996 /* NB: Ignore the default value 0. */
997 if (tunable_size
!= 0)
1000 tunable_size
= TUNABLE_GET (x86_shared_cache_size
, long int, NULL
);
1001 /* NB: Ignore the default value 0. */
1002 if (tunable_size
!= 0)
1003 shared
= tunable_size
;
1005 tunable_size
= TUNABLE_GET (x86_non_temporal_threshold
, long int, NULL
);
1006 if (tunable_size
> minimum_non_temporal_threshold
1007 && tunable_size
<= maximum_non_temporal_threshold
)
1008 non_temporal_threshold
= tunable_size
;
1010 tunable_size
= TUNABLE_GET (x86_rep_movsb_threshold
, long int, NULL
);
1011 if (tunable_size
> minimum_rep_movsb_threshold
)
1012 rep_movsb_threshold
= tunable_size
;
1014 /* NB: The default value of the x86_rep_stosb_threshold tunable is the
1015 same as the default value of __x86_rep_stosb_threshold and the
1016 minimum value is fixed. */
1017 rep_stosb_threshold
= TUNABLE_GET (x86_rep_stosb_threshold
,
1020 TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size
, data
, 0, SIZE_MAX
);
1021 TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size
, shared
, 0, SIZE_MAX
);
1022 TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold
, non_temporal_threshold
,
1023 minimum_non_temporal_threshold
,
1024 maximum_non_temporal_threshold
);
1025 TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold
, rep_movsb_threshold
,
1026 minimum_rep_movsb_threshold
, SIZE_MAX
);
1027 TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold
, rep_stosb_threshold
, 1,
1030 unsigned long int rep_movsb_stop_threshold
;
1031 /* ERMS feature is implemented from AMD Zen3 architecture and it is
1032 performing poorly for data above L2 cache size. Henceforth, adding
1033 an upper bound threshold parameter to limit the usage of Enhanced
1034 REP MOVSB operations and setting its value to L2 cache size. */
1035 if (cpu_features
->basic
.kind
== arch_kind_amd
)
1036 rep_movsb_stop_threshold
= core
;
1037 /* Setting the upper bound of ERMS to the computed value of
1038 non-temporal threshold for architectures other than AMD. */
1040 rep_movsb_stop_threshold
= non_temporal_threshold
;
1042 cpu_features
->data_cache_size
= data
;
1043 cpu_features
->shared_cache_size
= shared
;
1044 cpu_features
->non_temporal_threshold
= non_temporal_threshold
;
1045 cpu_features
->rep_movsb_threshold
= rep_movsb_threshold
;
1046 cpu_features
->rep_stosb_threshold
= rep_stosb_threshold
;
1047 cpu_features
->rep_movsb_stop_threshold
= rep_movsb_stop_threshold
;