1 /* Function cos vectorized with AVX-512, KNL and SKX versions.
2 Copyright (C) 2014-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
20 #include "svml_d_trig_data.h"
21 #include "svml_d_wrapper_impl.h"
23 .section .text.evex512, "ax", @progbits
24 ENTRY (_ZGVeN8v_cos_knl)
26 ALGORITHM DESCRIPTION:
28 ( low accuracy ( < 4ulp ) or enhanced performance
29 ( half of correct mantissa ) implementation )
31 Argument representation:
32 arg + Pi/2 = (N*Pi + R)
35 cos(arg) = sin(arg+Pi/2) = sin(N*Pi + R) = (-1)^N * sin(R)
36 sin(R) is approximated by corresponding polynomial
39 cfi_adjust_cfa_offset (8)
40 cfi_rel_offset (%rbp, 0)
42 cfi_def_cfa_register (%rbp)
45 movq __svml_d_trig_data@GOTPCREL(%rip), %rax
50 /* Check for large arguments path */
54 ARGUMENT RANGE REDUCTION:
55 Add Pi/2 to argument: X' = X+Pi/2
57 vaddpd __dHalfPI(%rax), %zmm0, %zmm5
58 vmovups __dInvPI(%rax), %zmm3
60 /* Get absolute argument value: X' = |X'| */
61 vpandq __dAbsMask(%rax), %zmm5, %zmm1
63 /* Y = X'*InvPi + RS : right shifter add */
64 vfmadd213pd __dRShifter(%rax), %zmm3, %zmm5
65 vmovups __dPI1_FMA(%rax), %zmm6
67 /* N = Y - RS : right shifter sub */
68 vsubpd __dRShifter(%rax), %zmm5, %zmm4
70 /* SignRes = Y<<63 : shift LSB to MSB place for result sign */
71 vpsllq $63, %zmm5, %zmm12
72 vmovups __dC7(%rax), %zmm8
75 vsubpd __dOneHalf(%rax), %zmm4, %zmm10
76 vcmppd $22, __dRangeVal(%rax), %zmm1, %k1
77 vpbroadcastq %rcx, %zmm2{%k1}{z}
78 vfnmadd231pd %zmm10, %zmm6, %zmm7
79 vptestmq %zmm2, %zmm2, %k0
82 vfnmadd231pd __dPI2_FMA(%rax), %zmm10, %zmm7
87 vfnmadd132pd __dPI3_FMA(%rax), %zmm7, %zmm10
90 POLYNOMIAL APPROXIMATION:
93 vmulpd %zmm10, %zmm10, %zmm9
94 vfmadd213pd __dC6(%rax), %zmm9, %zmm8
95 vfmadd213pd __dC5(%rax), %zmm9, %zmm8
96 vfmadd213pd __dC4(%rax), %zmm9, %zmm8
98 /* Poly = C3+R2*(C4+R2*(C5+R2*(C6+R2*C7))) */
99 vfmadd213pd __dC3(%rax), %zmm9, %zmm8
101 /* Poly = R+R*(R2*(C1+R2*(C2+R2*Poly))) */
102 vfmadd213pd __dC2(%rax), %zmm9, %zmm8
103 vfmadd213pd __dC1(%rax), %zmm9, %zmm8
104 vmulpd %zmm9, %zmm8, %zmm11
105 vfmadd213pd %zmm10, %zmm10, %zmm11
109 Final sign setting: Res = Poly^SignRes
111 vpxorq %zmm12, %zmm11, %zmm1
119 cfi_def_cfa_register (%rsp)
121 cfi_adjust_cfa_offset (-8)
127 vmovups %zmm0, 1152(%rsp)
128 vmovups %zmm1, 1216(%rsp)
132 kmovw %k4, 1048(%rsp)
134 kmovw %k5, 1040(%rsp)
135 kmovw %k6, 1032(%rsp)
136 kmovw %k7, 1024(%rsp)
137 vmovups %zmm16, 960(%rsp)
138 vmovups %zmm17, 896(%rsp)
139 vmovups %zmm18, 832(%rsp)
140 vmovups %zmm19, 768(%rsp)
141 vmovups %zmm20, 704(%rsp)
142 vmovups %zmm21, 640(%rsp)
143 vmovups %zmm22, 576(%rsp)
144 vmovups %zmm23, 512(%rsp)
145 vmovups %zmm24, 448(%rsp)
146 vmovups %zmm25, 384(%rsp)
147 vmovups %zmm26, 320(%rsp)
148 vmovups %zmm27, 256(%rsp)
149 vmovups %zmm28, 192(%rsp)
150 vmovups %zmm29, 128(%rsp)
151 vmovups %zmm30, 64(%rsp)
152 vmovups %zmm31, (%rsp)
153 movq %rsi, 1064(%rsp)
154 movq %rdi, 1056(%rsp)
155 movq %r12, 1096(%rsp)
156 cfi_offset_rel_rsp (12, 1096)
158 movq %r13, 1088(%rsp)
159 cfi_offset_rel_rsp (13, 1088)
161 movq %r14, 1080(%rsp)
162 cfi_offset_rel_rsp (14, 1080)
164 movq %r15, 1072(%rsp)
165 cfi_offset_rel_rsp (15, 1072)
183 kmovw 1048(%rsp), %k4
184 movq 1064(%rsp), %rsi
185 kmovw 1040(%rsp), %k5
186 movq 1056(%rsp), %rdi
187 kmovw 1032(%rsp), %k6
188 movq 1096(%rsp), %r12
190 movq 1088(%rsp), %r13
192 kmovw 1024(%rsp), %k7
193 vmovups 960(%rsp), %zmm16
194 vmovups 896(%rsp), %zmm17
195 vmovups 832(%rsp), %zmm18
196 vmovups 768(%rsp), %zmm19
197 vmovups 704(%rsp), %zmm20
198 vmovups 640(%rsp), %zmm21
199 vmovups 576(%rsp), %zmm22
200 vmovups 512(%rsp), %zmm23
201 vmovups 448(%rsp), %zmm24
202 vmovups 384(%rsp), %zmm25
203 vmovups 320(%rsp), %zmm26
204 vmovups 256(%rsp), %zmm27
205 vmovups 192(%rsp), %zmm28
206 vmovups 128(%rsp), %zmm29
207 vmovups 64(%rsp), %zmm30
208 vmovups (%rsp), %zmm31
209 movq 1080(%rsp), %r14
211 movq 1072(%rsp), %r15
213 vmovups 1216(%rsp), %zmm1
220 vmovsd 1160(%rsp,%r15), %xmm0
222 vmovsd %xmm0, 1224(%rsp,%r15)
228 vmovsd 1152(%rsp,%r15), %xmm0
230 vmovsd %xmm0, 1216(%rsp,%r15)
232 END (_ZGVeN8v_cos_knl)
234 ENTRY (_ZGVeN8v_cos_skx)
236 ALGORITHM DESCRIPTION:
238 ( low accuracy ( < 4ulp ) or enhanced performance
239 ( half of correct mantissa ) implementation )
241 Argument representation:
242 arg + Pi/2 = (N*Pi + R)
245 cos(arg) = sin(arg+Pi/2) = sin(N*Pi + R) = (-1)^N * sin(R)
246 sin(R) is approximated by corresponding polynomial
249 cfi_adjust_cfa_offset (8)
250 cfi_rel_offset (%rbp, 0)
252 cfi_def_cfa_register (%rbp)
255 movq __svml_d_trig_data@GOTPCREL(%rip), %rax
260 /* Check for large arguments path */
261 vpternlogd $0xff, %zmm2, %zmm2, %zmm2
264 ARGUMENT RANGE REDUCTION:
265 Add Pi/2 to argument: X' = X+Pi/2
267 vaddpd __dHalfPI(%rax), %zmm0, %zmm6
268 vmovups __dInvPI(%rax), %zmm3
269 vmovups __dRShifter(%rax), %zmm4
270 vmovups __dPI1_FMA(%rax), %zmm7
271 vmovups __dC7(%rax), %zmm9
273 /* Get absolute argument value: X' = |X'| */
274 vandpd __dAbsMask(%rax), %zmm6, %zmm1
276 /* Y = X'*InvPi + RS : right shifter add */
277 vfmadd213pd %zmm4, %zmm3, %zmm6
278 vcmppd $18, __dRangeVal(%rax), %zmm1, %k1
280 /* SignRes = Y<<63 : shift LSB to MSB place for result sign */
281 vpsllq $63, %zmm6, %zmm13
283 /* N = Y - RS : right shifter sub */
284 vsubpd %zmm4, %zmm6, %zmm5
287 vsubpd __dOneHalf(%rax), %zmm5, %zmm11
288 vfnmadd231pd %zmm11, %zmm7, %zmm8
291 vfnmadd231pd __dPI2_FMA(%rax), %zmm11, %zmm8
294 vfnmadd132pd __dPI3_FMA(%rax), %zmm8, %zmm11
297 POLYNOMIAL APPROXIMATION:
300 vmulpd %zmm11, %zmm11, %zmm10
301 vfmadd213pd __dC6(%rax), %zmm10, %zmm9
302 vfmadd213pd __dC5(%rax), %zmm10, %zmm9
303 vfmadd213pd __dC4(%rax), %zmm10, %zmm9
305 /* Poly = C3+R2*(C4+R2*(C5+R2*(C6+R2*C7))) */
306 vfmadd213pd __dC3(%rax), %zmm10, %zmm9
308 /* Poly = R+R*(R2*(C1+R2*(C2+R2*Poly))) */
309 vfmadd213pd __dC2(%rax), %zmm10, %zmm9
310 vfmadd213pd __dC1(%rax), %zmm10, %zmm9
311 vmulpd %zmm10, %zmm9, %zmm12
312 vfmadd213pd %zmm11, %zmm11, %zmm12
313 vpandnq %zmm1, %zmm1, %zmm2{%k1}
314 vcmppd $3, %zmm2, %zmm2, %k0
318 Final sign setting: Res = Poly^SignRes
320 vxorpd %zmm13, %zmm12, %zmm1
329 cfi_def_cfa_register (%rsp)
331 cfi_adjust_cfa_offset (-8)
337 vmovups %zmm0, 1152(%rsp)
338 vmovups %zmm1, 1216(%rsp)
343 kmovw %k4, 1048(%rsp)
344 kmovw %k5, 1040(%rsp)
345 kmovw %k6, 1032(%rsp)
346 kmovw %k7, 1024(%rsp)
347 vmovups %zmm16, 960(%rsp)
348 vmovups %zmm17, 896(%rsp)
349 vmovups %zmm18, 832(%rsp)
350 vmovups %zmm19, 768(%rsp)
351 vmovups %zmm20, 704(%rsp)
352 vmovups %zmm21, 640(%rsp)
353 vmovups %zmm22, 576(%rsp)
354 vmovups %zmm23, 512(%rsp)
355 vmovups %zmm24, 448(%rsp)
356 vmovups %zmm25, 384(%rsp)
357 vmovups %zmm26, 320(%rsp)
358 vmovups %zmm27, 256(%rsp)
359 vmovups %zmm28, 192(%rsp)
360 vmovups %zmm29, 128(%rsp)
361 vmovups %zmm30, 64(%rsp)
362 vmovups %zmm31, (%rsp)
363 movq %rsi, 1064(%rsp)
364 movq %rdi, 1056(%rsp)
365 movq %r12, 1096(%rsp)
366 cfi_offset_rel_rsp (12, 1096)
368 movq %r13, 1088(%rsp)
369 cfi_offset_rel_rsp (13, 1088)
371 movq %r14, 1080(%rsp)
372 cfi_offset_rel_rsp (14, 1080)
374 movq %r15, 1072(%rsp)
375 cfi_offset_rel_rsp (15, 1072)
393 kmovw 1048(%rsp), %k4
394 kmovw 1040(%rsp), %k5
395 kmovw 1032(%rsp), %k6
396 kmovw 1024(%rsp), %k7
397 vmovups 960(%rsp), %zmm16
398 vmovups 896(%rsp), %zmm17
399 vmovups 832(%rsp), %zmm18
400 vmovups 768(%rsp), %zmm19
401 vmovups 704(%rsp), %zmm20
402 vmovups 640(%rsp), %zmm21
403 vmovups 576(%rsp), %zmm22
404 vmovups 512(%rsp), %zmm23
405 vmovups 448(%rsp), %zmm24
406 vmovups 384(%rsp), %zmm25
407 vmovups 320(%rsp), %zmm26
408 vmovups 256(%rsp), %zmm27
409 vmovups 192(%rsp), %zmm28
410 vmovups 128(%rsp), %zmm29
411 vmovups 64(%rsp), %zmm30
412 vmovups (%rsp), %zmm31
413 vmovups 1216(%rsp), %zmm1
414 movq 1064(%rsp), %rsi
415 movq 1056(%rsp), %rdi
416 movq 1096(%rsp), %r12
418 movq 1088(%rsp), %r13
420 movq 1080(%rsp), %r14
422 movq 1072(%rsp), %r15
430 vmovsd 1160(%rsp,%r15), %xmm0
432 vmovsd 1160(%rsp,%r15), %xmm0
436 vmovsd %xmm0, 1224(%rsp,%r15)
442 vmovsd 1152(%rsp,%r15), %xmm0
444 vmovsd 1152(%rsp,%r15), %xmm0
448 vmovsd %xmm0, 1216(%rsp,%r15)
450 END (_ZGVeN8v_cos_skx)