1 /* Function pow vectorized with AVX-512. KNL and SKX versions.
2 Copyright (C) 2014-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
20 #include "svml_d_pow_data.h"
21 #include "svml_d_wrapper_impl.h"
23 /* ALGORITHM DESCRIPTION:
25 1) Calculating log2|x|
26 Here we use the following formula.
27 Let |x|=2^k1*X1, where k1 is integer, 1<=X1<2.
29 Rcp1 ~= 1/X1, X2=Rcp1*X1,
30 Rcp2 ~= 1/X2, X3=Rcp2*X2,
31 Rcp3 ~= 1/X3, Rcp3C ~= C/X3.
33 log2|x| = k1 + log2(1/Rcp1) + log2(1/Rcp2) + log2(C/Rcp3C) +
34 log2(X1*Rcp1*Rcp2*Rcp3C/C),
35 where X1*Rcp1*Rcp2*Rcp3C = C*(1+q), q is very small.
37 The values of Rcp1, log2(1/Rcp1), Rcp2, log2(1/Rcp2),
38 Rcp3C, log2(C/Rcp3C) are taken from tables.
39 Values of Rcp1, Rcp2, Rcp3C are such that RcpC=Rcp1*Rcp2*Rcp3C
40 is exactly represented in target precision.
42 log2(X1*Rcp1*Rcp2*Rcp3C/C) = log2(1+q) = ln(1+q)/ln2 =
43 = 1/(ln2)*q - 1/(2ln2)*q^2 + 1/(3ln2)*q^3 - ... =
44 = 1/(C*ln2)*cq - 1/(2*C^2*ln2)*cq^2 + 1/(3*C^3*ln2)*cq^3 - ... =
45 = (1 + a1)*cq + a2*cq^2 + a3*cq^3 + ...,
46 where cq = X1*Rcp1*Rcp2*Rcp3C-C,
47 a1=1/(C*ln(2))-1 is small,
51 We get 3 parts of log2 result: HH+HL+HLL ~= log2|x|.
53 2) Calculation of y*(HH+HL+HLL).
55 Get high PH and medium PL parts of y*log2|x|.
56 Get low PLL part of y*log2|x|.
57 Now we have PH+PL+PLL ~= y*log2|x|.
59 3) Calculation of 2^(PH+PL+PLL).
60 Mathematical idea of computing 2^(PH+PL+PLL) is the following.
61 Let's represent PH+PL+PLL in the form N + j/2^expK + Z,
62 where expK=7 in this implementation, N and j are integers,
63 0<=j<=2^expK-1, |Z|<2^(-expK-1).
64 Hence 2^(PH+PL+PLL) ~= 2^N * 2^(j/2^expK) * 2^Z,
65 where 2^(j/2^expK) is stored in a table, and
66 2^Z ~= 1 + B1*Z + B2*Z^2 ... + B5*Z^5.
68 We compute 2^(PH+PL+PLL) as follows.
69 Break PH into PHH + PHL, where PHH = N + j/2^expK.
71 Exp2Poly = B1*Z + B2*Z^2 ... + B5*Z^5
72 Get 2^(j/2^expK) from table in the form THI+TLO.
73 Now we have 2^(PH+PL+PLL) ~= 2^N * (THI + TLO) * (1 + Exp2Poly).
75 Get significand of 2^(PH+PL+PLL) in the form ResHi+ResLo:
77 ResLo := THI * Exp2Poly + TLO
79 Get exponent ERes of the result:
81 Result := ex(Res) + N. */
83 .section .text.evex512, "ax", @progbits
84 ENTRY (_ZGVeN8vv_pow_knl)
86 cfi_adjust_cfa_offset (8)
87 cfi_rel_offset (%rbp, 0)
89 cfi_def_cfa_register (%rbp)
92 vpsrlq $32, %zmm0, %zmm13
94 movq __svml_dpow_data@GOTPCREL(%rip), %rax
96 vpmovqd %zmm13, %ymm10
97 vpsrlq $32, %zmm12, %zmm14
100 vpmovqd %zmm14, %ymm15
102 /* x1 = x; Hi(x1) = (Hi(x1)&0x000fffff)|0x3ff00000 */
103 vmovups _dbOne(%rax), %zmm6
105 /* i = (((Hi(x) & 0x000ffe00) + 0x00000200) >> 10); -> i = (b1..b11 + 1) / 2 */
106 vmovaps %zmm10, %zmm5
108 /* k = Hi(x); k = k - 0x3fe7fe00; k = k >> 20 */
109 vpsubd _i3fe7fe00(%rax), %zmm10, %zmm14{%k1}
110 vpandd _iIndexMask(%rax), %zmm10, %zmm5{%k1}
111 vpsrad $20, %zmm14, %zmm14{%k1}
112 vpxord %zmm9, %zmm9, %zmm9
113 vpaddd _HIDELTA(%rax), %zmm10, %zmm3{%k1}
114 vpaddd _iIndexAdd(%rax), %zmm5, %zmm5{%k1}
115 vpxord %zmm7, %zmm7, %zmm7
116 vpaddd _i2p20_2p19(%rax), %zmm14, %zmm14{%k1}
117 vpcmpd $1, _LORANGE(%rax), %zmm3, %k2{%k1}
118 vpsrld $10, %zmm5, %zmm5{%k1}
119 vpandd _ABSMASK(%rax), %zmm15, %zmm2{%k1}
120 vpbroadcastd %ecx, %zmm1{%k2}{z}
122 /* Index for reciprocal table */
123 vpslld $3, %zmm5, %zmm8{%k1}
125 vgatherdpd 11712(%rax,%ymm8), %zmm9{%k2}
126 vpmovzxdq %ymm14, %zmm10
128 /* Index for log2 table */
129 vpslld $4, %zmm5, %zmm13{%k1}
131 vpsllq $32, %zmm10, %zmm3
132 vpxord %zmm8, %zmm8, %zmm8
133 vpcmpd $5, _INF(%rax), %zmm2, %k3{%k1}
134 vpbroadcastd %ecx, %zmm4{%k3}{z}
135 vpternlogq $248, _iMantissaMask(%rax), %zmm0, %zmm6
137 vpternlogq $168, _iffffffff00000000(%rax), %zmm10, %zmm3
139 /* x1Hi=x1; Lo(x1Hi)&=0xf8000000; x1Lo = x1-x1Hi */
140 vpandq _iHighMask(%rax), %zmm6, %zmm2
141 vgatherdpd 19976(%rax,%ymm13), %zmm8{%k2}
142 vpord %zmm4, %zmm1, %zmm11{%k1}
143 vsubpd _db2p20_2p19(%rax), %zmm3, %zmm1
144 vsubpd %zmm2, %zmm6, %zmm5
147 vmulpd %zmm9, %zmm6, %zmm6
148 vgatherdpd 19968(%rax,%ymm13), %zmm7{%k3}
151 vaddpd _LHN(%rax), %zmm6, %zmm4
153 /* E = -r1+__fence(x1Hi*rcp1) */
154 vfmsub213pd %zmm6, %zmm9, %zmm2
157 vaddpd %zmm7, %zmm1, %zmm7
160 vfmadd213pd %zmm2, %zmm9, %zmm5
163 vaddpd %zmm4, %zmm7, %zmm3
165 /* Rl = T-T_Rh; -> -Rh */
166 vsubpd %zmm3, %zmm7, %zmm9
169 vaddpd %zmm9, %zmm4, %zmm6
171 /* T_Rh_Eh = T_Rh + E */
172 vaddpd %zmm5, %zmm3, %zmm9
174 /* HLL = T_Rh - T_Rh_Eh; -> -Eh */
175 vsubpd %zmm9, %zmm3, %zmm2
178 vaddpd %zmm5, %zmm4, %zmm4
181 vaddpd %zmm2, %zmm5, %zmm1
182 vmovups _clv_2(%rax), %zmm5
184 /* HLL = HLL + (((((((a7)*cq+a6)*cq+a5)*cq+a4)*cq+a3)*cq+a2)*cq+a1)*cq */
185 vfmadd213pd _clv_3(%rax), %zmm4, %zmm5
188 vaddpd %zmm6, %zmm1, %zmm7
190 /* 2^(y*(HH+HL+HLL)) starts here:
191 yH = y; Lo(yH)&=0xf8000000
193 vpandq _iHighMask(%rax), %zmm12, %zmm6
196 vsubpd %zmm6, %zmm12, %zmm2
197 vfmadd213pd _clv_4(%rax), %zmm4, %zmm5
200 vaddpd %zmm8, %zmm7, %zmm8
201 vfmadd213pd _clv_5(%rax), %zmm4, %zmm5
202 vfmadd213pd _clv_6(%rax), %zmm4, %zmm5
203 vfmadd213pd _clv_7(%rax), %zmm4, %zmm5
204 vfmadd213pd %zmm8, %zmm4, %zmm5
206 /* T_Rh_Eh_HLLhi = T_Rh_Eh + HLL */
207 vaddpd %zmm5, %zmm9, %zmm13
209 /* HLLhi = T_Rh_Eh_HLLhi - T_Rh_Eh */
210 vsubpd %zmm9, %zmm13, %zmm10
212 /* HLL = HLL - HLLhi */
213 vsubpd %zmm10, %zmm5, %zmm3
215 /* HH = T_Rh_Eh_HLLhi; Lo(HH)&=0xf8000000 */
216 vpandq _iHighMask(%rax), %zmm13, %zmm5
219 vmulpd %zmm5, %zmm6, %zmm1
221 /* HL = T_Rh_Eh_HLLhi-HH */
222 vsubpd %zmm5, %zmm13, %zmm4
223 vpsrlq $32, %zmm1, %zmm14
226 pHH = pH + *(double*)&db2p45_2p44
228 vaddpd _db2p45_2p44(%rax), %zmm1, %zmm10
229 vpmovqd %zmm14, %ymm15
230 vpandd _ABSMASK(%rax), %zmm15, %zmm14{%k1}
231 vpcmpd $5, _DOMAINRANGE(%rax), %zmm14, %k3{%k1}
233 /* T1 = ((double*)exp2_tbl)[ 2*j ] */
234 vpxord %zmm14, %zmm14, %zmm14
235 vpbroadcastd %ecx, %zmm13{%k3}{z}
236 vpord %zmm13, %zmm11, %zmm11{%k1}
237 vptestmd %zmm11, %zmm11, %k0{%k1}
239 /* pL=yL*HL+yH*HL; pL+=yL*HH */
240 vmulpd %zmm4, %zmm2, %zmm11
242 vfmadd213pd %zmm11, %zmm4, %zmm6
244 /* pHH = pHH - *(double*)&db2p45_2p44 */
245 vsubpd _db2p45_2p44(%rax), %zmm10, %zmm11
246 vpmovqd %zmm10, %ymm4
250 _n = _n & 0xffffff80;
252 Hi(_2n) = (0x3ff+_n)<<20; Lo(_2n) = 0; -> 2^n
254 vpslld $13, %zmm4, %zmm7{%k1}
256 /* j = Lo(pHH)&0x0000007f */
257 vpandd _jIndexMask(%rax), %zmm4, %zmm9{%k1}
258 vfmadd213pd %zmm6, %zmm5, %zmm2
261 vsubpd %zmm11, %zmm1, %zmm1
262 vpaddd _iOne(%rax), %zmm7, %zmm7{%k1}
264 /* t=pL+pLL; t+=pHL */
265 vfmadd231pd %zmm12, %zmm3, %zmm2
266 vpslld $4, %zmm9, %zmm9{%k1}
268 vgatherdpd 36416(%rax,%ymm9), %zmm14{%k1}
269 vpmovzxdq %ymm7, %zmm8
270 vaddpd %zmm1, %zmm2, %zmm2
271 vmovups _cev_1(%rax), %zmm1
272 vpsllq $32, %zmm8, %zmm13
273 vpternlogq $168, _ifff0000000000000(%rax), %zmm8, %zmm13
274 vfmadd213pd _cev_2(%rax), %zmm2, %zmm1
275 vmulpd %zmm14, %zmm13, %zmm15
276 vfmadd213pd _cev_3(%rax), %zmm2, %zmm1
277 vmulpd %zmm2, %zmm15, %zmm3
278 vfmadd213pd _cev_4(%rax), %zmm2, %zmm1
279 vfmadd213pd _cev_5(%rax), %zmm2, %zmm1
280 vfmadd213pd %zmm15, %zmm3, %zmm1
288 cfi_def_cfa_register (%rsp)
290 cfi_adjust_cfa_offset (-8)
296 vmovups %zmm0, 1152(%rsp)
297 vmovups %zmm12, 1216(%rsp)
298 vmovups %zmm1, 1280(%rsp)
302 kmovw %k4, 1048(%rsp)
304 kmovw %k5, 1040(%rsp)
305 kmovw %k6, 1032(%rsp)
306 kmovw %k7, 1024(%rsp)
307 vmovups %zmm16, 960(%rsp)
308 vmovups %zmm17, 896(%rsp)
309 vmovups %zmm18, 832(%rsp)
310 vmovups %zmm19, 768(%rsp)
311 vmovups %zmm20, 704(%rsp)
312 vmovups %zmm21, 640(%rsp)
313 vmovups %zmm22, 576(%rsp)
314 vmovups %zmm23, 512(%rsp)
315 vmovups %zmm24, 448(%rsp)
316 vmovups %zmm25, 384(%rsp)
317 vmovups %zmm26, 320(%rsp)
318 vmovups %zmm27, 256(%rsp)
319 vmovups %zmm28, 192(%rsp)
320 vmovups %zmm29, 128(%rsp)
321 vmovups %zmm30, 64(%rsp)
322 vmovups %zmm31, (%rsp)
323 movq %rsi, 1064(%rsp)
324 movq %rdi, 1056(%rsp)
325 movq %r12, 1096(%rsp)
326 cfi_offset_rel_rsp (12, 1096)
328 movq %r13, 1088(%rsp)
329 cfi_offset_rel_rsp (13, 1088)
331 movq %r14, 1080(%rsp)
332 cfi_offset_rel_rsp (14, 1080)
334 movq %r15, 1072(%rsp)
335 cfi_offset_rel_rsp (15, 1072)
353 kmovw 1048(%rsp), %k4
354 movq 1064(%rsp), %rsi
355 kmovw 1040(%rsp), %k5
356 movq 1056(%rsp), %rdi
357 kmovw 1032(%rsp), %k6
358 movq 1096(%rsp), %r12
360 movq 1088(%rsp), %r13
362 kmovw 1024(%rsp), %k7
363 vmovups 960(%rsp), %zmm16
364 vmovups 896(%rsp), %zmm17
365 vmovups 832(%rsp), %zmm18
366 vmovups 768(%rsp), %zmm19
367 vmovups 704(%rsp), %zmm20
368 vmovups 640(%rsp), %zmm21
369 vmovups 576(%rsp), %zmm22
370 vmovups 512(%rsp), %zmm23
371 vmovups 448(%rsp), %zmm24
372 vmovups 384(%rsp), %zmm25
373 vmovups 320(%rsp), %zmm26
374 vmovups 256(%rsp), %zmm27
375 vmovups 192(%rsp), %zmm28
376 vmovups 128(%rsp), %zmm29
377 vmovups 64(%rsp), %zmm30
378 vmovups (%rsp), %zmm31
379 movq 1080(%rsp), %r14
381 movq 1072(%rsp), %r15
383 vmovups 1280(%rsp), %zmm1
390 vmovsd 1160(%rsp,%r15), %xmm0
391 vmovsd 1224(%rsp,%r15), %xmm1
393 vmovsd %xmm0, 1288(%rsp,%r15)
399 vmovsd 1152(%rsp,%r15), %xmm0
400 vmovsd 1216(%rsp,%r15), %xmm1
402 vmovsd %xmm0, 1280(%rsp,%r15)
405 END (_ZGVeN8vv_pow_knl)
407 ENTRY (_ZGVeN8vv_pow_skx)
409 cfi_adjust_cfa_offset (8)
410 cfi_rel_offset (%rbp, 0)
412 cfi_def_cfa_register (%rbp)
415 vpsrlq $32, %zmm0, %zmm10
419 vpmovqd %zmm10, %ymm7
420 movq __svml_dpow_data@GOTPCREL(%rip), %rax
422 vpsrlq $32, %zmm6, %zmm13
424 /* i = (((Hi(x) & 0x000ffe00) + 0x00000200) >> 10); -> i = (b1..b11 + 1) / 2 */
425 vpand _iIndexMask(%rax), %ymm7, %ymm15
426 vpaddd _HIDELTA(%rax), %ymm7, %ymm2
428 /* k = Hi(x); k = k - 0x3fe7fe00; k = k >> 20 */
429 vpsubd _i3fe7fe00(%rax), %ymm7, %ymm7
430 vmovdqu _ABSMASK(%rax), %ymm4
431 vmovdqu _LORANGE(%rax), %ymm3
433 /* x1 = x; Hi(x1) = (Hi(x1)&0x000fffff)|0x3ff00000 */
434 vmovups _dbOne(%rax), %zmm11
435 vmovdqu _INF(%rax), %ymm5
436 vpaddd _iIndexAdd(%rax), %ymm15, %ymm12
437 vpmovqd %zmm13, %ymm14
438 vpternlogq $248, _iMantissaMask(%rax), %zmm0, %zmm11
439 vpsrld $10, %ymm12, %ymm10
440 vpsrad $20, %ymm7, %ymm13
442 /* Index for reciprocal table */
443 vpslld $3, %ymm10, %ymm8
445 /* Index for log2 table */
446 vpslld $4, %ymm10, %ymm1
447 vpcmpgtd %ymm2, %ymm3, %ymm3
448 vpand %ymm4, %ymm14, %ymm2
449 vpaddd _i2p20_2p19(%rax), %ymm13, %ymm14
450 vpmovzxdq %ymm14, %zmm15
451 vpsllq $32, %zmm15, %zmm7
452 vpternlogq $168, _iffffffff00000000(%rax), %zmm15, %zmm7
453 vsubpd _db2p20_2p19(%rax), %zmm7, %zmm13
454 vpxord %zmm9, %zmm9, %zmm9
455 vgatherdpd 11712(%rax,%ymm8), %zmm9{%k1}
457 /* T1 = ((double*)exp2_tbl)[ 2*j ] */
459 vpxord %zmm12, %zmm12, %zmm12
460 vpxord %zmm8, %zmm8, %zmm8
461 vgatherdpd 19968(%rax,%ymm1), %zmm12{%k2}
462 vgatherdpd 19976(%rax,%ymm1), %zmm8{%k3}
463 vmovups _iHighMask(%rax), %zmm1
465 /* x1Hi=x1; Lo(x1Hi)&=0xf8000000; x1Lo = x1-x1Hi */
466 vandpd %zmm1, %zmm11, %zmm10
467 vsubpd %zmm10, %zmm11, %zmm15
470 vmulpd %zmm9, %zmm11, %zmm11
472 /* E = -r1+__fence(x1Hi*rcp1) */
473 vfmsub213pd %zmm11, %zmm9, %zmm10
476 vaddpd _LHN(%rax), %zmm11, %zmm14
479 vfmadd213pd %zmm10, %zmm9, %zmm15
482 vaddpd %zmm12, %zmm13, %zmm9
485 vaddpd %zmm14, %zmm9, %zmm11
487 /* T_Rh_Eh = T_Rh + E */
488 vaddpd %zmm15, %zmm11, %zmm13
490 /* Rl = T-T_Rh; -> -Rh */
491 vsubpd %zmm11, %zmm9, %zmm12
493 /* HLL = T_Rh - T_Rh_Eh; -> -Eh */
494 vsubpd %zmm13, %zmm11, %zmm9
497 vaddpd %zmm12, %zmm14, %zmm10
500 vaddpd %zmm9, %zmm15, %zmm7
503 vaddpd %zmm10, %zmm7, %zmm12
505 /* 2^(y*(HH+HL+HLL)) starts here:
506 yH = y; Lo(yH)&=0xf8000000
508 vandpd %zmm1, %zmm6, %zmm7
511 vaddpd %zmm8, %zmm12, %zmm12
514 vaddpd %zmm15, %zmm14, %zmm8
515 vmovups _clv_2(%rax), %zmm14
517 /* HLL = HLL + (((((((a7)*cq+a6)*cq+a5)*cq+a4)*cq+a3)*cq+a2)*cq+a1)*cq */
518 vfmadd213pd _clv_3(%rax), %zmm8, %zmm14
519 vfmadd213pd _clv_4(%rax), %zmm8, %zmm14
520 vfmadd213pd _clv_5(%rax), %zmm8, %zmm14
521 vfmadd213pd _clv_6(%rax), %zmm8, %zmm14
522 vfmadd213pd _clv_7(%rax), %zmm8, %zmm14
523 vfmadd213pd %zmm12, %zmm8, %zmm14
526 vsubpd %zmm7, %zmm6, %zmm8
528 /* T_Rh_Eh_HLLhi = T_Rh_Eh + HLL */
529 vaddpd %zmm14, %zmm13, %zmm15
531 /* HH = T_Rh_Eh_HLLhi; Lo(HH)&=0xf8000000 */
532 vandpd %zmm1, %zmm15, %zmm11
534 /* HLLhi = T_Rh_Eh_HLLhi - T_Rh_Eh */
535 vsubpd %zmm13, %zmm15, %zmm13
538 vmulpd %zmm11, %zmm7, %zmm9
540 /* HLL = HLL - HLLhi */
541 vsubpd %zmm13, %zmm14, %zmm12
543 /* HL = T_Rh_Eh_HLLhi-HH */
544 vsubpd %zmm11, %zmm15, %zmm10
545 vpsrlq $32, %zmm9, %zmm1
546 vmovdqu _DOMAINRANGE(%rax), %ymm13
548 vpand %ymm4, %ymm1, %ymm1
549 vpcmpgtd %ymm5, %ymm2, %ymm4
550 vpcmpeqd %ymm5, %ymm2, %ymm5
551 vpternlogd $254, %ymm5, %ymm4, %ymm3
552 vpcmpgtd %ymm13, %ymm1, %ymm2
553 vpcmpeqd %ymm13, %ymm1, %ymm4
554 vpternlogd $254, %ymm4, %ymm2, %ymm3
557 vmovups _db2p45_2p44(%rax), %zmm2
559 /* pHH = pH + *(double*)&db2p45_2p44 */
560 vaddpd %zmm2, %zmm9, %zmm1
563 /* j = Lo(pHH)&0x0000007f */
564 vpand _jIndexMask(%rax), %ymm5, %ymm14
565 vpslld $4, %ymm14, %ymm15
566 vmovmskps %ymm3, %ecx
568 /* pL=yL*HL+yH*HL; pL+=yL*HH */
569 vmulpd %zmm10, %zmm8, %zmm3
570 vfmadd213pd %zmm3, %zmm10, %zmm7
571 vfmadd213pd %zmm7, %zmm11, %zmm8
576 Hi(_2n) = (0x3ff+_n)<<20; Lo(_2n) = 0; -> 2^n
578 vpslld $13, %ymm5, %ymm7
580 /* t=pL+pLL; t+=pHL */
581 vfmadd231pd %zmm6, %zmm12, %zmm8
582 vpaddd _iOne(%rax), %ymm7, %ymm10
583 vpmovzxdq %ymm10, %zmm11
584 vpsllq $32, %zmm11, %zmm3
585 vpternlogq $168, _ifff0000000000000(%rax), %zmm11, %zmm3
587 /* pHH = pHH - *(double*)&db2p45_2p44 */
588 vsubpd %zmm2, %zmm1, %zmm11
589 vmovups _cev_1(%rax), %zmm2
592 vsubpd %zmm11, %zmm9, %zmm9
593 vaddpd %zmm9, %zmm8, %zmm8
594 vfmadd213pd _cev_2(%rax), %zmm8, %zmm2
595 vfmadd213pd _cev_3(%rax), %zmm8, %zmm2
596 vfmadd213pd _cev_4(%rax), %zmm8, %zmm2
597 vfmadd213pd _cev_5(%rax), %zmm8, %zmm2
598 vpxord %zmm4, %zmm4, %zmm4
599 vgatherdpd 36416(%rax,%ymm15), %zmm4{%k1}
600 vmulpd %zmm4, %zmm3, %zmm1
601 vmulpd %zmm8, %zmm1, %zmm12
602 vfmadd213pd %zmm1, %zmm12, %zmm2
610 cfi_def_cfa_register (%rsp)
612 cfi_adjust_cfa_offset (-8)
618 vmovups %zmm0, 1152(%rsp)
619 vmovups %zmm6, 1216(%rsp)
620 vmovups %zmm2, 1280(%rsp)
625 kmovw %k4, 1048(%rsp)
626 kmovw %k5, 1040(%rsp)
627 kmovw %k6, 1032(%rsp)
628 kmovw %k7, 1024(%rsp)
629 vmovups %zmm16, 960(%rsp)
630 vmovups %zmm17, 896(%rsp)
631 vmovups %zmm18, 832(%rsp)
632 vmovups %zmm19, 768(%rsp)
633 vmovups %zmm20, 704(%rsp)
634 vmovups %zmm21, 640(%rsp)
635 vmovups %zmm22, 576(%rsp)
636 vmovups %zmm23, 512(%rsp)
637 vmovups %zmm24, 448(%rsp)
638 vmovups %zmm25, 384(%rsp)
639 vmovups %zmm26, 320(%rsp)
640 vmovups %zmm27, 256(%rsp)
641 vmovups %zmm28, 192(%rsp)
642 vmovups %zmm29, 128(%rsp)
643 vmovups %zmm30, 64(%rsp)
644 vmovups %zmm31, (%rsp)
645 movq %rsi, 1064(%rsp)
646 movq %rdi, 1056(%rsp)
647 movq %r12, 1096(%rsp)
648 cfi_offset_rel_rsp (12, 1096)
650 movq %r13, 1088(%rsp)
651 cfi_offset_rel_rsp (13, 1088)
653 movq %r14, 1080(%rsp)
654 cfi_offset_rel_rsp (14, 1080)
656 movq %r15, 1072(%rsp)
657 cfi_offset_rel_rsp (15, 1072)
675 kmovw 1048(%rsp), %k4
676 kmovw 1040(%rsp), %k5
677 kmovw 1032(%rsp), %k6
678 kmovw 1024(%rsp), %k7
679 vmovups 960(%rsp), %zmm16
680 vmovups 896(%rsp), %zmm17
681 vmovups 832(%rsp), %zmm18
682 vmovups 768(%rsp), %zmm19
683 vmovups 704(%rsp), %zmm20
684 vmovups 640(%rsp), %zmm21
685 vmovups 576(%rsp), %zmm22
686 vmovups 512(%rsp), %zmm23
687 vmovups 448(%rsp), %zmm24
688 vmovups 384(%rsp), %zmm25
689 vmovups 320(%rsp), %zmm26
690 vmovups 256(%rsp), %zmm27
691 vmovups 192(%rsp), %zmm28
692 vmovups 128(%rsp), %zmm29
693 vmovups 64(%rsp), %zmm30
694 vmovups (%rsp), %zmm31
695 vmovups 1280(%rsp), %zmm2
696 movq 1064(%rsp), %rsi
697 movq 1056(%rsp), %rdi
698 movq 1096(%rsp), %r12
700 movq 1088(%rsp), %r13
702 movq 1080(%rsp), %r14
704 movq 1072(%rsp), %r15
712 vmovsd 1224(%rsp,%r15), %xmm1
714 vmovsd 1160(%rsp,%r15), %xmm0
718 vmovsd %xmm0, 1288(%rsp,%r15)
724 vmovsd 1216(%rsp,%r15), %xmm1
726 vmovsd 1152(%rsp,%r15), %xmm0
730 vmovsd %xmm0, 1280(%rsp,%r15)
733 END (_ZGVeN8vv_pow_skx)