1 /* Function sincosf vectorized with AVX-512. KNL and SKX versions.
2 Copyright (C) 2014-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
20 #include "svml_s_trig_data.h"
21 #include "svml_s_wrapper_impl.h"
24 ALGORITHM DESCRIPTION:
26 1) Range reduction to [-Pi/4; +Pi/4] interval
27 a) Grab sign from source argument and save it.
28 b) Remove sign using AND operation
29 c) Getting octant Y by 2/Pi multiplication
30 d) Add "Right Shifter" value
31 e) Treat obtained value as integer S for destination sign setting.
32 SS = ((S-S&1)&2)<<30; For sin part
33 SC = ((S+S&1)&2)<<30; For cos part
34 f) Change destination sign if source sign is negative
36 g) Subtract "Right Shifter" (0x4B000000) value
37 h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 4 parts:
38 X = X - Y*PI1 - Y*PI2 - Y*PI3 - Y*PI4;
39 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
40 a) Calculate X^2 = X * X
41 b) Calculate 2 polynomials for sin and cos:
42 RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
43 RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4))));
44 c) Swap RS & RC if first bit of obtained value after
45 Right Shifting is set to 1. Using And, Andnot & Or operations.
46 3) Destination sign setting
47 a) Set shifted destination sign using XOR operation:
49 R2 = XOR( RC, SC ). */
52 ENTRY (_ZGVeN16vl4l4_sincosf_knl)
53 #ifndef HAVE_AVX512DQ_ASM_SUPPORT
54 WRAPPER_IMPL_AVX512_fFF _ZGVdN8vl4l4_sincosf
57 cfi_adjust_cfa_offset (8)
58 cfi_rel_offset (%rbp, 0)
60 cfi_def_cfa_register (%rbp)
63 movq __svml_s_trig_data@GOTPCREL(%rip), %rax
66 vmovups __sAbsMask(%rax), %zmm0
67 vmovups __sInvPI(%rax), %zmm3
69 /* Absolute argument computation */
70 vpandd %zmm0, %zmm2, %zmm1
71 vmovups __sPI1_FMA(%rax), %zmm5
72 vmovups __sSignMask(%rax), %zmm9
73 vpandnd %zmm2, %zmm0, %zmm0
75 /* h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 3 parts:
76 X = X - Y*PI1 - Y*PI2 - Y*PI3 */
80 /* c) Getting octant Y by 2/Pi multiplication
81 d) Add "Right Shifter" value */
82 vfmadd213ps __sRShifter(%rax), %zmm1, %zmm3
83 vmovups __sPI3_FMA(%rax), %zmm7
85 /* g) Subtract "Right Shifter" (0x4B000000) value */
86 vsubps __sRShifter(%rax), %zmm3, %zmm12
88 /* e) Treat obtained value as integer S for destination sign setting */
89 vpslld $31, %zmm3, %zmm13
90 vmovups __sA7_FMA(%rax), %zmm14
91 vfnmadd231ps %zmm12, %zmm5, %zmm6
93 /* 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
94 a) Calculate X^2 = X * X
95 b) Calculate 2 polynomials for sin and cos:
96 RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
97 RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))) */
98 vmovaps %zmm14, %zmm15
99 vmovups __sA9_FMA(%rax), %zmm3
100 vcmpps $22, __sRangeReductionVal(%rax), %zmm1, %k1
101 vpbroadcastd %edx, %zmm1{%k1}{z}
102 vfnmadd231ps __sPI2_FMA(%rax), %zmm12, %zmm6
103 vptestmd %zmm1, %zmm1, %k0
104 vpandd %zmm6, %zmm9, %zmm11
106 vpxord __sOneHalf(%rax), %zmm11, %zmm4
108 /* Result sign calculations */
109 vpternlogd $150, %zmm13, %zmm9, %zmm11
111 /* Add correction term 0.5 for cos() part */
112 vaddps %zmm4, %zmm12, %zmm10
113 vfnmadd213ps %zmm6, %zmm7, %zmm12
114 vfnmadd231ps %zmm10, %zmm5, %zmm8
115 vpxord %zmm13, %zmm12, %zmm13
116 vmulps %zmm13, %zmm13, %zmm12
117 vfnmadd231ps __sPI2_FMA(%rax), %zmm10, %zmm8
118 vfmadd231ps __sA9_FMA(%rax), %zmm12, %zmm15
119 vfnmadd213ps %zmm8, %zmm7, %zmm10
120 vfmadd213ps __sA5_FMA(%rax), %zmm12, %zmm15
121 vpxord %zmm11, %zmm10, %zmm5
122 vmulps %zmm5, %zmm5, %zmm4
123 vfmadd213ps __sA3(%rax), %zmm12, %zmm15
124 vfmadd213ps %zmm14, %zmm4, %zmm3
125 vmulps %zmm12, %zmm15, %zmm14
126 vfmadd213ps __sA5_FMA(%rax), %zmm4, %zmm3
127 vfmadd213ps %zmm13, %zmm13, %zmm14
128 vfmadd213ps __sA3(%rax), %zmm4, %zmm3
129 vpxord %zmm0, %zmm14, %zmm0
130 vmulps %zmm4, %zmm3, %zmm3
131 vfmadd213ps %zmm5, %zmm5, %zmm3
137 vmovups %zmm0, (%rdi)
138 vmovups %zmm3, (%rsi)
140 cfi_def_cfa_register (%rsp)
142 cfi_adjust_cfa_offset (-8)
148 vmovups %zmm2, 1152(%rsp)
149 vmovups %zmm0, 1216(%rsp)
150 vmovups %zmm3, 1280(%rsp)
154 kmovw %k4, 1048(%rsp)
156 kmovw %k5, 1040(%rsp)
157 kmovw %k6, 1032(%rsp)
158 kmovw %k7, 1024(%rsp)
159 vmovups %zmm16, 960(%rsp)
160 vmovups %zmm17, 896(%rsp)
161 vmovups %zmm18, 832(%rsp)
162 vmovups %zmm19, 768(%rsp)
163 vmovups %zmm20, 704(%rsp)
164 vmovups %zmm21, 640(%rsp)
165 vmovups %zmm22, 576(%rsp)
166 vmovups %zmm23, 512(%rsp)
167 vmovups %zmm24, 448(%rsp)
168 vmovups %zmm25, 384(%rsp)
169 vmovups %zmm26, 320(%rsp)
170 vmovups %zmm27, 256(%rsp)
171 vmovups %zmm28, 192(%rsp)
172 vmovups %zmm29, 128(%rsp)
173 vmovups %zmm30, 64(%rsp)
174 vmovups %zmm31, (%rsp)
175 movq %rsi, 1056(%rsp)
176 movq %r12, 1096(%rsp)
177 cfi_offset_rel_rsp (12, 1096)
179 movq %r13, 1088(%rsp)
180 cfi_offset_rel_rsp (13, 1088)
182 movq %r14, 1080(%rsp)
183 cfi_offset_rel_rsp (14, 1080)
185 movq %r15, 1072(%rsp)
186 cfi_offset_rel_rsp (15, 1072)
187 movq %rbx, 1064(%rsp)
207 kmovw 1048(%rsp), %k4
208 movq 1056(%rsp), %rsi
209 kmovw 1040(%rsp), %k5
210 movq 1096(%rsp), %r12
212 kmovw 1032(%rsp), %k6
213 movq 1088(%rsp), %r13
215 kmovw 1024(%rsp), %k7
216 vmovups 960(%rsp), %zmm16
217 vmovups 896(%rsp), %zmm17
218 vmovups 832(%rsp), %zmm18
219 vmovups 768(%rsp), %zmm19
220 vmovups 704(%rsp), %zmm20
221 vmovups 640(%rsp), %zmm21
222 vmovups 576(%rsp), %zmm22
223 vmovups 512(%rsp), %zmm23
224 vmovups 448(%rsp), %zmm24
225 vmovups 384(%rsp), %zmm25
226 vmovups 320(%rsp), %zmm26
227 vmovups 256(%rsp), %zmm27
228 vmovups 192(%rsp), %zmm28
229 vmovups 128(%rsp), %zmm29
230 vmovups 64(%rsp), %zmm30
231 vmovups (%rsp), %zmm31
232 movq 1080(%rsp), %r14
234 movq 1072(%rsp), %r15
236 movq 1064(%rsp), %rbx
237 vmovups 1216(%rsp), %zmm0
238 vmovups 1280(%rsp), %zmm3
244 vmovss 1156(%rsp,%r15,8), %xmm0
246 call JUMPTARGET(sinf)
248 vmovss %xmm0, 1220(%rsp,%r15,8)
249 vmovss 1156(%rsp,%r15,8), %xmm0
251 call JUMPTARGET(cosf)
253 vmovss %xmm0, 1284(%rsp,%r15,8)
258 vmovss 1152(%rsp,%r15,8), %xmm0
260 call JUMPTARGET(sinf)
262 vmovss %xmm0, 1216(%rsp,%r15,8)
263 vmovss 1152(%rsp,%r15,8), %xmm0
265 call JUMPTARGET(cosf)
267 vmovss %xmm0, 1280(%rsp,%r15,8)
270 END (_ZGVeN16vl4l4_sincosf_knl)
271 libmvec_hidden_def(_ZGVeN16vl4l4_sincosf_knl)
273 ENTRY (_ZGVeN16vl4l4_sincosf_skx)
274 #ifndef HAVE_AVX512DQ_ASM_SUPPORT
275 WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
278 cfi_adjust_cfa_offset (8)
279 cfi_rel_offset (%rbp, 0)
281 cfi_def_cfa_register (%rbp)
284 movq __svml_s_trig_data@GOTPCREL(%rip), %rax
286 vmovups __sAbsMask(%rax), %zmm3
287 vmovups __sInvPI(%rax), %zmm5
288 vmovups __sRShifter(%rax), %zmm6
289 vmovups __sPI1_FMA(%rax), %zmm9
290 vmovups __sPI2_FMA(%rax), %zmm10
291 vmovups __sSignMask(%rax), %zmm14
292 vmovups __sOneHalf(%rax), %zmm7
293 vmovups __sPI3_FMA(%rax), %zmm12
295 /* Absolute argument computation */
296 vandps %zmm3, %zmm4, %zmm2
298 /* c) Getting octant Y by 2/Pi multiplication
299 d) Add "Right Shifter" value */
300 vfmadd213ps %zmm6, %zmm2, %zmm5
301 vcmpps $18, __sRangeReductionVal(%rax), %zmm2, %k1
303 /* e) Treat obtained value as integer S for destination sign setting */
304 vpslld $31, %zmm5, %zmm0
306 /* g) Subtract "Right Shifter" (0x4B000000) value */
307 vsubps %zmm6, %zmm5, %zmm5
308 vmovups __sA3(%rax), %zmm6
310 /* h) Subtract Y*(PI/2) from X argument, where PI/2 divided to 3 parts:
311 X = X - Y*PI1 - Y*PI2 - Y*PI3 */
312 vmovaps %zmm2, %zmm11
313 vfnmadd231ps %zmm5, %zmm9, %zmm11
314 vfnmadd231ps %zmm5, %zmm10, %zmm11
315 vandps %zmm11, %zmm14, %zmm1
316 vxorps %zmm1, %zmm7, %zmm8
318 /* Result sign calculations */
319 vpternlogd $150, %zmm0, %zmm14, %zmm1
320 vmovups .L_2il0floatpacket.13(%rip), %zmm14
322 /* Add correction term 0.5 for cos() part */
323 vaddps %zmm8, %zmm5, %zmm15
324 vfnmadd213ps %zmm11, %zmm12, %zmm5
325 vandnps %zmm4, %zmm3, %zmm11
326 vmovups __sA7_FMA(%rax), %zmm3
327 vmovaps %zmm2, %zmm13
328 vfnmadd231ps %zmm15, %zmm9, %zmm13
329 vxorps %zmm0, %zmm5, %zmm9
330 vmovups __sA5_FMA(%rax), %zmm0
331 vfnmadd231ps %zmm15, %zmm10, %zmm13
332 vmulps %zmm9, %zmm9, %zmm8
333 vfnmadd213ps %zmm13, %zmm12, %zmm15
334 vmovups __sA9_FMA(%rax), %zmm12
335 vxorps %zmm1, %zmm15, %zmm1
336 vmulps %zmm1, %zmm1, %zmm13
338 /* 2) Polynomial (minimax for sin within [-Pi/4; +Pi/4] interval)
339 a) Calculate X^2 = X * X
340 b) Calculate 2 polynomials for sin and cos:
341 RS = X * ( A0 + X^2 * (A1 + x^2 * (A2 + x^2 * (A3))));
342 RC = B0 + X^2 * (B1 + x^2 * (B2 + x^2 * (B3 + x^2 * (B4)))) */
343 vmovaps %zmm12, %zmm7
344 vfmadd213ps %zmm3, %zmm8, %zmm7
345 vfmadd213ps %zmm3, %zmm13, %zmm12
346 vfmadd213ps %zmm0, %zmm8, %zmm7
347 vfmadd213ps %zmm0, %zmm13, %zmm12
348 vfmadd213ps %zmm6, %zmm8, %zmm7
349 vfmadd213ps %zmm6, %zmm13, %zmm12
350 vmulps %zmm8, %zmm7, %zmm10
351 vmulps %zmm13, %zmm12, %zmm3
352 vfmadd213ps %zmm9, %zmm9, %zmm10
353 vfmadd213ps %zmm1, %zmm1, %zmm3
354 vxorps %zmm11, %zmm10, %zmm0
355 vpandnd %zmm2, %zmm2, %zmm14{%k1}
356 vptestmd %zmm14, %zmm14, %k0
363 vmovups %zmm0, (%rdi)
364 vmovups %zmm3, (%rsi)
366 cfi_def_cfa_register (%rsp)
368 cfi_adjust_cfa_offset (-8)
374 vmovups %zmm4, 1152(%rsp)
375 vmovups %zmm0, 1216(%rsp)
376 vmovups %zmm3, 1280(%rsp)
381 kmovw %k4, 1048(%rsp)
382 kmovw %k5, 1040(%rsp)
383 kmovw %k6, 1032(%rsp)
384 kmovw %k7, 1024(%rsp)
385 vmovups %zmm16, 960(%rsp)
386 vmovups %zmm17, 896(%rsp)
387 vmovups %zmm18, 832(%rsp)
388 vmovups %zmm19, 768(%rsp)
389 vmovups %zmm20, 704(%rsp)
390 vmovups %zmm21, 640(%rsp)
391 vmovups %zmm22, 576(%rsp)
392 vmovups %zmm23, 512(%rsp)
393 vmovups %zmm24, 448(%rsp)
394 vmovups %zmm25, 384(%rsp)
395 vmovups %zmm26, 320(%rsp)
396 vmovups %zmm27, 256(%rsp)
397 vmovups %zmm28, 192(%rsp)
398 vmovups %zmm29, 128(%rsp)
399 vmovups %zmm30, 64(%rsp)
400 vmovups %zmm31, (%rsp)
401 movq %rsi, 1056(%rsp)
402 movq %r12, 1096(%rsp)
403 cfi_offset_rel_rsp (12, 1096)
405 movq %r13, 1088(%rsp)
406 cfi_offset_rel_rsp (13, 1088)
408 movq %r14, 1080(%rsp)
409 cfi_offset_rel_rsp (14, 1080)
411 movq %r15, 1072(%rsp)
412 cfi_offset_rel_rsp (15, 1072)
413 movq %rbx, 1064(%rsp)
432 kmovw 1048(%rsp), %k4
434 kmovw 1040(%rsp), %k5
435 kmovw 1032(%rsp), %k6
436 kmovw 1024(%rsp), %k7
437 vmovups 960(%rsp), %zmm16
438 vmovups 896(%rsp), %zmm17
439 vmovups 832(%rsp), %zmm18
440 vmovups 768(%rsp), %zmm19
441 vmovups 704(%rsp), %zmm20
442 vmovups 640(%rsp), %zmm21
443 vmovups 576(%rsp), %zmm22
444 vmovups 512(%rsp), %zmm23
445 vmovups 448(%rsp), %zmm24
446 vmovups 384(%rsp), %zmm25
447 vmovups 320(%rsp), %zmm26
448 vmovups 256(%rsp), %zmm27
449 vmovups 192(%rsp), %zmm28
450 vmovups 128(%rsp), %zmm29
451 vmovups 64(%rsp), %zmm30
452 vmovups (%rsp), %zmm31
453 vmovups 1216(%rsp), %zmm0
454 vmovups 1280(%rsp), %zmm3
455 movq 1056(%rsp), %rsi
456 movq 1096(%rsp), %r12
458 movq 1088(%rsp), %r13
460 movq 1080(%rsp), %r14
462 movq 1072(%rsp), %r15
464 movq 1064(%rsp), %rbx
470 vmovss 1156(%rsp,%r15,8), %xmm0
472 vmovss 1156(%rsp,%r15,8), %xmm0
474 call JUMPTARGET(sinf)
476 vmovss %xmm0, 1220(%rsp,%r15,8)
477 vmovss 1156(%rsp,%r15,8), %xmm0
479 call JUMPTARGET(cosf)
481 vmovss %xmm0, 1284(%rsp,%r15,8)
486 vmovss 1152(%rsp,%r15,8), %xmm0
488 vmovss 1152(%rsp,%r15,8), %xmm0
490 call JUMPTARGET(sinf)
492 vmovss %xmm0, 1216(%rsp,%r15,8)
493 vmovss 1152(%rsp,%r15,8), %xmm0
495 call JUMPTARGET(cosf)
497 vmovss %xmm0, 1280(%rsp,%r15,8)
500 END (_ZGVeN16vl4l4_sincosf_skx)
501 libmvec_hidden_def(_ZGVeN16vl4l4_sincosf_skx)
503 /* Wrapper between vvv and vl4l4 vector variants. */
504 .macro WRAPPER_AVX512_vvv_vl4l4 callee
507 cfi_adjust_cfa_offset (8)
508 cfi_rel_offset (%rbp, 0)
510 cfi_def_cfa_register (%rbp)
513 vmovups %zmm1, 128(%rsp)
515 vmovups %zmm2, 192(%rdi)
516 vmovups %zmm3, 256(%rdi)
517 vmovups %zmm4, 320(%rdi)
519 call HIDDEN_JUMPTARGET(\callee)
598 movl 104(%rsp), %r10d
611 movl 124(%rsp), %r11d
617 cfi_def_cfa_register (%rsp)
619 cfi_adjust_cfa_offset (-8)
628 .cfi_escape 0x10,0x6,0x2,0x76,0
631 .cfi_escape 0xf,0x3,0x76,0x78,0x6
632 leal -112(%rbp), %esi
633 leal -176(%rbp), %edi
635 vmovdqa64 %zmm1, -240(%ebp)
636 vmovdqa64 %zmm2, -304(%ebp)
637 call HIDDEN_JUMPTARGET(\callee)
638 movl -240(%ebp), %eax
639 vmovss -176(%ebp), %xmm0
641 movl -236(%ebp), %eax
642 vmovss -172(%ebp), %xmm0
644 movl -232(%ebp), %eax
645 vmovss -168(%ebp), %xmm0
647 movl -228(%ebp), %eax
648 vmovss -164(%ebp), %xmm0
650 movl -224(%ebp), %eax
651 vmovss -160(%ebp), %xmm0
653 movl -220(%ebp), %eax
654 vmovss -156(%ebp), %xmm0
656 movl -216(%ebp), %eax
657 vmovss -152(%ebp), %xmm0
659 movl -212(%ebp), %eax
660 vmovss -148(%ebp), %xmm0
662 movl -208(%ebp), %eax
663 vmovss -144(%ebp), %xmm0
665 movl -204(%ebp), %eax
666 vmovss -140(%ebp), %xmm0
668 movl -200(%ebp), %eax
669 vmovss -136(%ebp), %xmm0
671 movl -196(%ebp), %eax
672 vmovss -132(%ebp), %xmm0
674 movl -192(%ebp), %eax
675 vmovss -128(%ebp), %xmm0
677 movl -188(%ebp), %eax
678 vmovss -124(%ebp), %xmm0
680 movl -184(%ebp), %eax
681 vmovss -120(%ebp), %xmm0
683 movl -180(%ebp), %eax
684 vmovss -116(%ebp), %xmm0
686 movl -304(%ebp), %eax
687 vmovss -112(%ebp), %xmm0
689 movl -300(%ebp), %eax
690 vmovss -108(%ebp), %xmm0
692 movl -296(%ebp), %eax
693 vmovss -104(%ebp), %xmm0
695 movl -292(%ebp), %eax
696 vmovss -100(%ebp), %xmm0
698 movl -288(%ebp), %eax
699 vmovss -96(%ebp), %xmm0
701 movl -284(%ebp), %eax
702 vmovss -92(%ebp), %xmm0
704 movl -280(%ebp), %eax
705 vmovss -88(%ebp), %xmm0
707 movl -276(%ebp), %eax
708 vmovss -84(%ebp), %xmm0
710 movl -272(%ebp), %eax
711 vmovss -80(%ebp), %xmm0
713 movl -268(%ebp), %eax
714 vmovss -76(%ebp), %xmm0
716 movl -264(%ebp), %eax
717 vmovss -72(%ebp), %xmm0
719 movl -260(%ebp), %eax
720 vmovss -68(%ebp), %xmm0
722 movl -256(%ebp), %eax
723 vmovss -64(%ebp), %xmm0
725 movl -252(%ebp), %eax
726 vmovss -60(%ebp), %xmm0
728 movl -248(%ebp), %eax
729 vmovss -56(%ebp), %xmm0
731 movl -244(%ebp), %eax
732 vmovss -52(%ebp), %xmm0
744 ENTRY (_ZGVeN16vvv_sincosf_knl)
745 WRAPPER_AVX512_vvv_vl4l4 _ZGVeN16vl4l4_sincosf_knl
746 END (_ZGVeN16vvv_sincosf_knl)
748 ENTRY (_ZGVeN16vvv_sincosf_skx)
749 WRAPPER_AVX512_vvv_vl4l4 _ZGVeN16vl4l4_sincosf_skx
750 END (_ZGVeN16vvv_sincosf_skx)
752 .section .rodata, "a"
753 .L_2il0floatpacket.13:
754 .long 0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff,0xffffffff
755 .type .L_2il0floatpacket.13,@object