Use libc_hidden_* for fputs (bug 15105).
[glibc.git] / sysdeps / x86 / cpu-features.c
blob0fc3674c4b81f85af9a19228ed2b0e83e41511ab
1 /* Initialize CPU feature data.
2 This file is part of the GNU C Library.
3 Copyright (C) 2008-2018 Free Software Foundation, Inc.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <cpuid.h>
20 #include <cpu-features.h>
21 #include <dl-hwcap.h>
22 #include <libc-pointer-arith.h>
24 #if HAVE_TUNABLES
25 # define TUNABLE_NAMESPACE tune
26 # include <unistd.h> /* Get STDOUT_FILENO for _dl_printf. */
27 # include <elf/dl-tunables.h>
29 extern void TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *)
30 attribute_hidden;
31 #endif
33 static void
34 get_common_indeces (struct cpu_features *cpu_features,
35 unsigned int *family, unsigned int *model,
36 unsigned int *extended_model, unsigned int *stepping)
38 if (family)
40 unsigned int eax;
41 __cpuid (1, eax, cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx,
42 cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx,
43 cpu_features->cpuid[COMMON_CPUID_INDEX_1].edx);
44 cpu_features->cpuid[COMMON_CPUID_INDEX_1].eax = eax;
45 *family = (eax >> 8) & 0x0f;
46 *model = (eax >> 4) & 0x0f;
47 *extended_model = (eax >> 12) & 0xf0;
48 *stepping = eax & 0x0f;
49 if (*family == 0x0f)
51 *family += (eax >> 20) & 0xff;
52 *model += *extended_model;
56 if (cpu_features->max_cpuid >= 7)
57 __cpuid_count (7, 0,
58 cpu_features->cpuid[COMMON_CPUID_INDEX_7].eax,
59 cpu_features->cpuid[COMMON_CPUID_INDEX_7].ebx,
60 cpu_features->cpuid[COMMON_CPUID_INDEX_7].ecx,
61 cpu_features->cpuid[COMMON_CPUID_INDEX_7].edx);
63 /* Can we call xgetbv? */
64 if (CPU_FEATURES_CPU_P (cpu_features, OSXSAVE))
66 unsigned int xcrlow;
67 unsigned int xcrhigh;
68 asm ("xgetbv" : "=a" (xcrlow), "=d" (xcrhigh) : "c" (0));
69 /* Is YMM and XMM state usable? */
70 if ((xcrlow & (bit_YMM_state | bit_XMM_state)) ==
71 (bit_YMM_state | bit_XMM_state))
73 /* Determine if AVX is usable. */
74 if (CPU_FEATURES_CPU_P (cpu_features, AVX))
76 cpu_features->feature[index_arch_AVX_Usable]
77 |= bit_arch_AVX_Usable;
78 /* The following features depend on AVX being usable. */
79 /* Determine if AVX2 is usable. */
80 if (CPU_FEATURES_CPU_P (cpu_features, AVX2))
81 cpu_features->feature[index_arch_AVX2_Usable]
82 |= bit_arch_AVX2_Usable;
83 /* Determine if FMA is usable. */
84 if (CPU_FEATURES_CPU_P (cpu_features, FMA))
85 cpu_features->feature[index_arch_FMA_Usable]
86 |= bit_arch_FMA_Usable;
89 /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and
90 ZMM16-ZMM31 state are enabled. */
91 if ((xcrlow & (bit_Opmask_state | bit_ZMM0_15_state
92 | bit_ZMM16_31_state)) ==
93 (bit_Opmask_state | bit_ZMM0_15_state | bit_ZMM16_31_state))
95 /* Determine if AVX512F is usable. */
96 if (CPU_FEATURES_CPU_P (cpu_features, AVX512F))
98 cpu_features->feature[index_arch_AVX512F_Usable]
99 |= bit_arch_AVX512F_Usable;
100 /* Determine if AVX512DQ is usable. */
101 if (CPU_FEATURES_CPU_P (cpu_features, AVX512DQ))
102 cpu_features->feature[index_arch_AVX512DQ_Usable]
103 |= bit_arch_AVX512DQ_Usable;
108 /* For _dl_runtime_resolve, set xsave_state_size to xsave area
109 size + integer register save size and align it to 64 bytes. */
110 if (cpu_features->max_cpuid >= 0xd)
112 unsigned int eax, ebx, ecx, edx;
114 __cpuid_count (0xd, 0, eax, ebx, ecx, edx);
115 if (ebx != 0)
117 unsigned int xsave_state_full_size
118 = ALIGN_UP (ebx + STATE_SAVE_OFFSET, 64);
120 cpu_features->xsave_state_size
121 = xsave_state_full_size;
122 cpu_features->xsave_state_full_size
123 = xsave_state_full_size;
125 __cpuid_count (0xd, 1, eax, ebx, ecx, edx);
127 /* Check if XSAVEC is available. */
128 if ((eax & (1 << 1)) != 0)
130 unsigned int xstate_comp_offsets[32];
131 unsigned int xstate_comp_sizes[32];
132 unsigned int i;
134 xstate_comp_offsets[0] = 0;
135 xstate_comp_offsets[1] = 160;
136 xstate_comp_offsets[2] = 576;
137 xstate_comp_sizes[0] = 160;
138 xstate_comp_sizes[1] = 256;
140 for (i = 2; i < 32; i++)
142 if ((STATE_SAVE_MASK & (1 << i)) != 0)
144 __cpuid_count (0xd, i, eax, ebx, ecx, edx);
145 xstate_comp_sizes[i] = eax;
147 else
149 ecx = 0;
150 xstate_comp_sizes[i] = 0;
153 if (i > 2)
155 xstate_comp_offsets[i]
156 = (xstate_comp_offsets[i - 1]
157 + xstate_comp_sizes[i -1]);
158 if ((ecx & (1 << 1)) != 0)
159 xstate_comp_offsets[i]
160 = ALIGN_UP (xstate_comp_offsets[i], 64);
164 /* Use XSAVEC. */
165 unsigned int size
166 = xstate_comp_offsets[31] + xstate_comp_sizes[31];
167 if (size)
169 cpu_features->xsave_state_size
170 = ALIGN_UP (size + STATE_SAVE_OFFSET, 64);
171 cpu_features->feature[index_arch_XSAVEC_Usable]
172 |= bit_arch_XSAVEC_Usable;
180 static inline void
181 init_cpu_features (struct cpu_features *cpu_features)
183 unsigned int ebx, ecx, edx;
184 unsigned int family = 0;
185 unsigned int model = 0;
186 enum cpu_features_kind kind;
188 #if !HAS_CPUID
189 if (__get_cpuid_max (0, 0) == 0)
191 kind = arch_kind_other;
192 goto no_cpuid;
194 #endif
196 __cpuid (0, cpu_features->max_cpuid, ebx, ecx, edx);
198 /* This spells out "GenuineIntel". */
199 if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
201 unsigned int extended_model, stepping;
203 kind = arch_kind_intel;
205 get_common_indeces (cpu_features, &family, &model, &extended_model,
206 &stepping);
208 if (family == 0x06)
210 model += extended_model;
211 switch (model)
213 case 0x1c:
214 case 0x26:
215 /* BSF is slow on Atom. */
216 cpu_features->feature[index_arch_Slow_BSF]
217 |= bit_arch_Slow_BSF;
218 break;
220 case 0x57:
221 /* Knights Landing. Enable Silvermont optimizations. */
223 case 0x5c:
224 case 0x5f:
225 /* Unaligned load versions are faster than SSSE3
226 on Goldmont. */
228 case 0x4c:
229 /* Airmont is a die shrink of Silvermont. */
231 case 0x37:
232 case 0x4a:
233 case 0x4d:
234 case 0x5a:
235 case 0x5d:
236 /* Unaligned load versions are faster than SSSE3
237 on Silvermont. */
238 #if index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop
239 # error index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop
240 #endif
241 #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
242 # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
243 #endif
244 #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
245 # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
246 #endif
247 cpu_features->feature[index_arch_Fast_Unaligned_Load]
248 |= (bit_arch_Fast_Unaligned_Load
249 | bit_arch_Fast_Unaligned_Copy
250 | bit_arch_Prefer_PMINUB_for_stringop
251 | bit_arch_Slow_SSE4_2);
252 break;
254 default:
255 /* Unknown family 0x06 processors. Assuming this is one
256 of Core i3/i5/i7 processors if AVX is available. */
257 if (!CPU_FEATURES_CPU_P (cpu_features, AVX))
258 break;
260 case 0x1a:
261 case 0x1e:
262 case 0x1f:
263 case 0x25:
264 case 0x2c:
265 case 0x2e:
266 case 0x2f:
267 /* Rep string instructions, unaligned load, unaligned copy,
268 and pminub are fast on Intel Core i3, i5 and i7. */
269 #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load
270 # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load
271 #endif
272 #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
273 # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
274 #endif
275 #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
276 # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
277 #endif
278 cpu_features->feature[index_arch_Fast_Rep_String]
279 |= (bit_arch_Fast_Rep_String
280 | bit_arch_Fast_Unaligned_Load
281 | bit_arch_Fast_Unaligned_Copy
282 | bit_arch_Prefer_PMINUB_for_stringop);
283 break;
285 case 0x3f:
286 /* Xeon E7 v3 with stepping >= 4 has working TSX. */
287 if (stepping >= 4)
288 break;
289 case 0x3c:
290 case 0x45:
291 case 0x46:
292 /* Disable Intel TSX on Haswell processors (except Xeon E7 v3
293 with stepping >= 4) to avoid TSX on kernels that weren't
294 updated with the latest microcode package (which disables
295 broken feature by default). */
296 cpu_features->cpuid[index_cpu_RTM].reg_RTM &= ~bit_cpu_RTM;
297 break;
301 /* Unaligned load with 256-bit AVX registers are faster on
302 Intel processors with AVX2. */
303 if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
304 cpu_features->feature[index_arch_AVX_Fast_Unaligned_Load]
305 |= bit_arch_AVX_Fast_Unaligned_Load;
307 /* Since AVX512ER is unique to Xeon Phi, set Prefer_No_VZEROUPPER
308 if AVX512ER is available. Don't use AVX512 to avoid lower CPU
309 frequency if AVX512ER isn't available. */
310 if (CPU_FEATURES_CPU_P (cpu_features, AVX512ER))
311 cpu_features->feature[index_arch_Prefer_No_VZEROUPPER]
312 |= bit_arch_Prefer_No_VZEROUPPER;
313 else
314 cpu_features->feature[index_arch_Prefer_No_AVX512]
315 |= bit_arch_Prefer_No_AVX512;
317 /* This spells out "AuthenticAMD". */
318 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
320 unsigned int extended_model, stepping;
322 kind = arch_kind_amd;
324 get_common_indeces (cpu_features, &family, &model, &extended_model,
325 &stepping);
327 ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx;
329 unsigned int eax;
330 __cpuid (0x80000000, eax, ebx, ecx, edx);
331 if (eax >= 0x80000001)
332 __cpuid (0x80000001,
333 cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].eax,
334 cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ebx,
335 cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ecx,
336 cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].edx);
338 if (HAS_ARCH_FEATURE (AVX_Usable))
340 /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and
341 FMA4 requires AVX, determine if FMA4 is usable here. */
342 if (CPU_FEATURES_CPU_P (cpu_features, FMA4))
343 cpu_features->feature[index_arch_FMA4_Usable]
344 |= bit_arch_FMA4_Usable;
347 if (family == 0x15)
349 #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
350 # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
351 #endif
352 /* "Excavator" */
353 if (model >= 0x60 && model <= 0x7f)
354 cpu_features->feature[index_arch_Fast_Unaligned_Load]
355 |= (bit_arch_Fast_Unaligned_Load
356 | bit_arch_Fast_Copy_Backward);
359 else
361 kind = arch_kind_other;
362 get_common_indeces (cpu_features, NULL, NULL, NULL, NULL);
365 /* Support i586 if CX8 is available. */
366 if (CPU_FEATURES_CPU_P (cpu_features, CX8))
367 cpu_features->feature[index_arch_I586] |= bit_arch_I586;
369 /* Support i686 if CMOV is available. */
370 if (CPU_FEATURES_CPU_P (cpu_features, CMOV))
371 cpu_features->feature[index_arch_I686] |= bit_arch_I686;
373 #if !HAS_CPUID
374 no_cpuid:
375 #endif
377 cpu_features->family = family;
378 cpu_features->model = model;
379 cpu_features->kind = kind;
381 #if HAVE_TUNABLES
382 TUNABLE_GET (hwcaps, tunable_val_t *, TUNABLE_CALLBACK (set_hwcaps));
383 cpu_features->non_temporal_threshold
384 = TUNABLE_GET (x86_non_temporal_threshold, long int, NULL);
385 cpu_features->data_cache_size
386 = TUNABLE_GET (x86_data_cache_size, long int, NULL);
387 cpu_features->shared_cache_size
388 = TUNABLE_GET (x86_shared_cache_size, long int, NULL);
389 #endif
391 /* Reuse dl_platform, dl_hwcap and dl_hwcap_mask for x86. */
392 #if !HAVE_TUNABLES && defined SHARED
393 /* The glibc.tune.hwcap_mask tunable is initialized already, so no need to do
394 this. */
395 GLRO(dl_hwcap_mask) = HWCAP_IMPORTANT;
396 #endif
398 #ifdef __x86_64__
399 GLRO(dl_hwcap) = HWCAP_X86_64;
400 if (cpu_features->kind == arch_kind_intel)
402 const char *platform = NULL;
404 if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
405 && CPU_FEATURES_CPU_P (cpu_features, AVX512CD))
407 if (CPU_FEATURES_CPU_P (cpu_features, AVX512ER))
409 if (CPU_FEATURES_CPU_P (cpu_features, AVX512PF))
410 platform = "xeon_phi";
412 else
414 if (CPU_FEATURES_CPU_P (cpu_features, AVX512BW)
415 && CPU_FEATURES_CPU_P (cpu_features, AVX512DQ)
416 && CPU_FEATURES_CPU_P (cpu_features, AVX512VL))
417 GLRO(dl_hwcap) |= HWCAP_X86_AVX512_1;
421 if (platform == NULL
422 && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
423 && CPU_FEATURES_ARCH_P (cpu_features, FMA_Usable)
424 && CPU_FEATURES_CPU_P (cpu_features, BMI1)
425 && CPU_FEATURES_CPU_P (cpu_features, BMI2)
426 && CPU_FEATURES_CPU_P (cpu_features, LZCNT)
427 && CPU_FEATURES_CPU_P (cpu_features, MOVBE)
428 && CPU_FEATURES_CPU_P (cpu_features, POPCNT))
429 platform = "haswell";
431 if (platform != NULL)
432 GLRO(dl_platform) = platform;
434 #else
435 GLRO(dl_hwcap) = 0;
436 if (CPU_FEATURES_CPU_P (cpu_features, SSE2))
437 GLRO(dl_hwcap) |= HWCAP_X86_SSE2;
439 if (CPU_FEATURES_ARCH_P (cpu_features, I686))
440 GLRO(dl_platform) = "i686";
441 else if (CPU_FEATURES_ARCH_P (cpu_features, I586))
442 GLRO(dl_platform) = "i586";
443 #endif