More NEWS entries / fixes for float_t / double_t changes.
[glibc.git] / sysdeps / x86 / cpu-features.c
blobe228a76c403697eb47fa46d1f3dafb363776af95
1 /* Initialize CPU feature data.
2 This file is part of the GNU C Library.
3 Copyright (C) 2008-2016 Free Software Foundation, Inc.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <cpuid.h>
20 #include <cpu-features.h>
22 static void
23 get_common_indeces (struct cpu_features *cpu_features,
24 unsigned int *family, unsigned int *model,
25 unsigned int *extended_model)
27 if (family)
29 unsigned int eax;
30 __cpuid (1, eax, cpu_features->cpuid[COMMON_CPUID_INDEX_1].ebx,
31 cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx,
32 cpu_features->cpuid[COMMON_CPUID_INDEX_1].edx);
33 cpu_features->cpuid[COMMON_CPUID_INDEX_1].eax = eax;
34 *family = (eax >> 8) & 0x0f;
35 *model = (eax >> 4) & 0x0f;
36 *extended_model = (eax >> 12) & 0xf0;
37 if (*family == 0x0f)
39 *family += (eax >> 20) & 0xff;
40 *model += *extended_model;
44 if (cpu_features->max_cpuid >= 7)
45 __cpuid_count (7, 0,
46 cpu_features->cpuid[COMMON_CPUID_INDEX_7].eax,
47 cpu_features->cpuid[COMMON_CPUID_INDEX_7].ebx,
48 cpu_features->cpuid[COMMON_CPUID_INDEX_7].ecx,
49 cpu_features->cpuid[COMMON_CPUID_INDEX_7].edx);
51 /* Can we call xgetbv? */
52 if (CPU_FEATURES_CPU_P (cpu_features, OSXSAVE))
54 unsigned int xcrlow;
55 unsigned int xcrhigh;
56 asm ("xgetbv" : "=a" (xcrlow), "=d" (xcrhigh) : "c" (0));
57 /* Is YMM and XMM state usable? */
58 if ((xcrlow & (bit_YMM_state | bit_XMM_state)) ==
59 (bit_YMM_state | bit_XMM_state))
61 /* Determine if AVX is usable. */
62 if (CPU_FEATURES_CPU_P (cpu_features, AVX))
64 cpu_features->feature[index_arch_AVX_Usable]
65 |= bit_arch_AVX_Usable;
66 /* The following features depend on AVX being usable. */
67 /* Determine if AVX2 is usable. */
68 if (CPU_FEATURES_CPU_P (cpu_features, AVX2))
69 cpu_features->feature[index_arch_AVX2_Usable]
70 |= bit_arch_AVX2_Usable;
71 /* Determine if FMA is usable. */
72 if (CPU_FEATURES_CPU_P (cpu_features, FMA))
73 cpu_features->feature[index_arch_FMA_Usable]
74 |= bit_arch_FMA_Usable;
77 /* Check if OPMASK state, upper 256-bit of ZMM0-ZMM15 and
78 ZMM16-ZMM31 state are enabled. */
79 if ((xcrlow & (bit_Opmask_state | bit_ZMM0_15_state
80 | bit_ZMM16_31_state)) ==
81 (bit_Opmask_state | bit_ZMM0_15_state | bit_ZMM16_31_state))
83 /* Determine if AVX512F is usable. */
84 if (CPU_FEATURES_CPU_P (cpu_features, AVX512F))
86 cpu_features->feature[index_arch_AVX512F_Usable]
87 |= bit_arch_AVX512F_Usable;
88 /* Determine if AVX512DQ is usable. */
89 if (CPU_FEATURES_CPU_P (cpu_features, AVX512DQ))
90 cpu_features->feature[index_arch_AVX512DQ_Usable]
91 |= bit_arch_AVX512DQ_Usable;
98 static inline void
99 init_cpu_features (struct cpu_features *cpu_features)
101 unsigned int ebx, ecx, edx;
102 unsigned int family = 0;
103 unsigned int model = 0;
104 enum cpu_features_kind kind;
106 #if !HAS_CPUID
107 if (__get_cpuid_max (0, 0) == 0)
109 kind = arch_kind_other;
110 goto no_cpuid;
112 #endif
114 __cpuid (0, cpu_features->max_cpuid, ebx, ecx, edx);
116 /* This spells out "GenuineIntel". */
117 if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
119 unsigned int extended_model;
121 kind = arch_kind_intel;
123 get_common_indeces (cpu_features, &family, &model, &extended_model);
125 if (family == 0x06)
127 ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx;
128 model += extended_model;
129 switch (model)
131 case 0x1c:
132 case 0x26:
133 /* BSF is slow on Atom. */
134 cpu_features->feature[index_arch_Slow_BSF]
135 |= bit_arch_Slow_BSF;
136 break;
138 case 0x57:
139 /* Knights Landing. Enable Silvermont optimizations. */
140 cpu_features->feature[index_arch_Prefer_No_VZEROUPPER]
141 |= bit_arch_Prefer_No_VZEROUPPER;
143 case 0x5c:
144 case 0x5f:
145 /* Unaligned load versions are faster than SSSE3
146 on Goldmont. */
148 case 0x4c:
149 /* Airmont is a die shrink of Silvermont. */
151 case 0x37:
152 case 0x4a:
153 case 0x4d:
154 case 0x5a:
155 case 0x5d:
156 /* Unaligned load versions are faster than SSSE3
157 on Silvermont. */
158 #if index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop
159 # error index_arch_Fast_Unaligned_Load != index_arch_Prefer_PMINUB_for_stringop
160 #endif
161 #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
162 # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2
163 #endif
164 #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
165 # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy
166 #endif
167 cpu_features->feature[index_arch_Fast_Unaligned_Load]
168 |= (bit_arch_Fast_Unaligned_Load
169 | bit_arch_Fast_Unaligned_Copy
170 | bit_arch_Prefer_PMINUB_for_stringop
171 | bit_arch_Slow_SSE4_2);
172 break;
174 default:
175 /* Unknown family 0x06 processors. Assuming this is one
176 of Core i3/i5/i7 processors if AVX is available. */
177 if ((ecx & bit_cpu_AVX) == 0)
178 break;
180 case 0x1a:
181 case 0x1e:
182 case 0x1f:
183 case 0x25:
184 case 0x2c:
185 case 0x2e:
186 case 0x2f:
187 /* Rep string instructions, unaligned load, unaligned copy,
188 and pminub are fast on Intel Core i3, i5 and i7. */
189 #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load
190 # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Load
191 #endif
192 #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
193 # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop
194 #endif
195 #if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
196 # error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy
197 #endif
198 cpu_features->feature[index_arch_Fast_Rep_String]
199 |= (bit_arch_Fast_Rep_String
200 | bit_arch_Fast_Unaligned_Load
201 | bit_arch_Fast_Unaligned_Copy
202 | bit_arch_Prefer_PMINUB_for_stringop);
203 break;
207 /* Unaligned load with 256-bit AVX registers are faster on
208 Intel processors with AVX2. */
209 if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
210 cpu_features->feature[index_arch_AVX_Fast_Unaligned_Load]
211 |= bit_arch_AVX_Fast_Unaligned_Load;
213 /* To avoid SSE transition penalty, use _dl_runtime_resolve_slow.
214 If XGETBV suports ECX == 1, use _dl_runtime_resolve_opt. */
215 cpu_features->feature[index_arch_Use_dl_runtime_resolve_slow]
216 |= bit_arch_Use_dl_runtime_resolve_slow;
217 if (cpu_features->max_cpuid >= 0xd)
219 unsigned int eax;
221 __cpuid_count (0xd, 1, eax, ebx, ecx, edx);
222 if ((eax & (1 << 2)) != 0)
223 cpu_features->feature[index_arch_Use_dl_runtime_resolve_opt]
224 |= bit_arch_Use_dl_runtime_resolve_opt;
227 /* This spells out "AuthenticAMD". */
228 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
230 unsigned int extended_model;
232 kind = arch_kind_amd;
234 get_common_indeces (cpu_features, &family, &model, &extended_model);
236 ecx = cpu_features->cpuid[COMMON_CPUID_INDEX_1].ecx;
238 unsigned int eax;
239 __cpuid (0x80000000, eax, ebx, ecx, edx);
240 if (eax >= 0x80000001)
241 __cpuid (0x80000001,
242 cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].eax,
243 cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ebx,
244 cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].ecx,
245 cpu_features->cpuid[COMMON_CPUID_INDEX_80000001].edx);
247 if (HAS_ARCH_FEATURE (AVX_Usable))
249 /* Since the FMA4 bit is in COMMON_CPUID_INDEX_80000001 and
250 FMA4 requires AVX, determine if FMA4 is usable here. */
251 if (CPU_FEATURES_CPU_P (cpu_features, FMA4))
252 cpu_features->feature[index_arch_FMA4_Usable]
253 |= bit_arch_FMA4_Usable;
256 if (family == 0x15)
258 #if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
259 # error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward
260 #endif
261 /* "Excavator" */
262 if (model >= 0x60 && model <= 0x7f)
263 cpu_features->feature[index_arch_Fast_Unaligned_Load]
264 |= (bit_arch_Fast_Unaligned_Load
265 | bit_arch_Fast_Copy_Backward);
268 else
270 kind = arch_kind_other;
271 get_common_indeces (cpu_features, NULL, NULL, NULL);
274 /* Support i586 if CX8 is available. */
275 if (CPU_FEATURES_CPU_P (cpu_features, CX8))
276 cpu_features->feature[index_arch_I586] |= bit_arch_I586;
278 /* Support i686 if CMOV is available. */
279 if (CPU_FEATURES_CPU_P (cpu_features, CMOV))
280 cpu_features->feature[index_arch_I686] |= bit_arch_I686;
282 #if !HAS_CPUID
283 no_cpuid:
284 #endif
286 cpu_features->family = family;
287 cpu_features->model = model;
288 cpu_features->kind = kind;