1 /* Function pow vectorized with AVX2.
2 Copyright (C) 2014-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
20 #include "svml_d_pow_data.h"
22 .section .text.avx2, "ax", @progbits
23 ENTRY (_ZGVdN4vv_pow_avx2)
25 ALGORITHM DESCRIPTION:
27 1) Calculating log2|x|
28 Here we use the following formula.
29 Let |x|=2^k1*X1, where k1 is integer, 1<=X1<2.
31 Rcp1 ~= 1/X1, X2=Rcp1*X1,
32 Rcp2 ~= 1/X2, X3=Rcp2*X2,
33 Rcp3 ~= 1/X3, Rcp3C ~= C/X3.
35 log2|x| = k1 + log2(1/Rcp1) + log2(1/Rcp2) + log2(C/Rcp3C) +
36 log2(X1*Rcp1*Rcp2*Rcp3C/C),
37 where X1*Rcp1*Rcp2*Rcp3C = C*(1+q), q is very small.
39 The values of Rcp1, log2(1/Rcp1), Rcp2, log2(1/Rcp2),
40 Rcp3C, log2(C/Rcp3C) are taken from tables.
41 Values of Rcp1, Rcp2, Rcp3C are such that RcpC=Rcp1*Rcp2*Rcp3C
42 is exactly represented in target precision.
44 log2(X1*Rcp1*Rcp2*Rcp3C/C) = log2(1+q) = ln(1+q)/ln2 =
45 = 1/(ln2)*q - 1/(2ln2)*q^2 + 1/(3ln2)*q^3 - ... =
46 = 1/(C*ln2)*cq - 1/(2*C^2*ln2)*cq^2 + 1/(3*C^3*ln2)*cq^3 - ... =
47 = (1 + a1)*cq + a2*cq^2 + a3*cq^3 + ...,
48 where cq = X1*Rcp1*Rcp2*Rcp3C-C,
49 a1=1/(C*ln(2))-1 is small,
53 We get 3 parts of log2 result: HH+HL+HLL ~= log2|x|.
55 2) Calculation of y*(HH+HL+HLL).
57 Get high PH and medium PL parts of y*log2|x|.
58 Get low PLL part of y*log2|x|.
59 Now we have PH+PL+PLL ~= y*log2|x|.
61 3) Calculation of 2^(PH+PL+PLL).
62 Mathematical idea of computing 2^(PH+PL+PLL) is the following.
63 Let's represent PH+PL+PLL in the form N + j/2^expK + Z,
64 where expK=7 in this implementation, N and j are integers,
65 0<=j<=2^expK-1, |Z|<2^(-expK-1).
66 Hence 2^(PH+PL+PLL) ~= 2^N * 2^(j/2^expK) * 2^Z,
67 where 2^(j/2^expK) is stored in a table, and
68 2^Z ~= 1 + B1*Z + B2*Z^2 ... + B5*Z^5.
70 We compute 2^(PH+PL+PLL) as follows.
71 Break PH into PHH + PHL, where PHH = N + j/2^expK.
73 Exp2Poly = B1*Z + B2*Z^2 ... + B5*Z^5
74 Get 2^(j/2^expK) from table in the form THI+TLO.
75 Now we have 2^(PH+PL+PLL) ~= 2^N * (THI + TLO) * (1 + Exp2Poly).
77 Get significand of 2^(PH+PL+PLL) in the form ResHi+ResLo:
79 ResLo := THI * Exp2Poly + TLO
81 Get exponent ERes of the result:
83 Result := ex(Res) + N. */
86 cfi_adjust_cfa_offset (8)
87 cfi_rel_offset (%rbp, 0)
89 cfi_def_cfa_register (%rbp)
92 movq __svml_dpow_data@GOTPCREL(%rip), %rax
93 vmovups %ymm11, 160(%rsp)
94 vmovups %ymm8, 224(%rsp)
95 vmovups %ymm10, 352(%rsp)
96 vmovups %ymm9, 384(%rsp)
97 vmovups %ymm13, 288(%rsp)
99 vxorpd %ymm1, %ymm1, %ymm1
100 vextracti128 $1, %ymm0, %xmm5
101 vshufps $221, %xmm5, %xmm0, %xmm5
103 /* i = (((Hi(x) & 0x000ffe00) + 0x00000200) >> 10); -> i = (b1..b11 + 1) / 2 */
104 vandps _iIndexMask(%rax), %xmm5, %xmm3
105 vpaddd _iIndexAdd(%rax), %xmm3, %xmm6
106 vpsrld $10, %xmm6, %xmm8
108 /* Index for reciprocal table */
109 vpslld $3, %xmm8, %xmm9
111 /* Index for log2 table */
112 vpslld $4, %xmm8, %xmm6
114 /* x1 = x; Hi(x1) = (Hi(x1)&0x000fffff)|0x3ff00000 */
115 vandpd _iMantissaMask(%rax), %ymm0, %ymm4
116 vorpd _dbOne(%rax), %ymm4, %ymm13
117 vpcmpeqd %ymm4, %ymm4, %ymm4
118 vpcmpeqd %ymm8, %ymm8, %ymm8
120 /* k = Hi(x); k = k - 0x3fe7fe00; k = k >> 20 */
121 vpsubd _i3fe7fe00(%rax), %xmm5, %xmm3
122 vpaddd _HIDELTA(%rax), %xmm5, %xmm5
123 vextracti128 $1, %ymm11, %xmm7
124 vshufps $221, %xmm7, %xmm11, %xmm2
125 vpand _ABSMASK(%rax), %xmm2, %xmm10
126 vpcmpeqd %ymm2, %ymm2, %ymm2
127 vgatherdpd %ymm2, 11712(%rax,%xmm9), %ymm1
128 vmovups _LORANGE(%rax), %xmm7
129 vxorpd %ymm2, %ymm2, %ymm2
130 vgatherdpd %ymm4, 19968(%rax,%xmm6), %ymm2
131 vxorpd %ymm4, %ymm4, %ymm4
132 vgatherdpd %ymm8, 19976(%rax,%xmm6), %ymm4
133 vpsrad $20, %xmm3, %xmm6
134 vpaddd _i2p20_2p19(%rax), %xmm6, %xmm9
135 vpshufd $80, %xmm9, %xmm8
136 vpshufd $250, %xmm9, %xmm3
138 /* x1Hi=x1; Lo(x1Hi)&=0xf8000000; x1Lo = x1-x1Hi */
139 vandpd _iHighMask(%rax), %ymm13, %ymm9
140 vinserti128 $1, %xmm3, %ymm8, %ymm6
141 vandpd _iffffffff00000000(%rax), %ymm6, %ymm8
144 vmulpd %ymm1, %ymm13, %ymm6
145 vsubpd %ymm9, %ymm13, %ymm3
146 vsubpd _db2p20_2p19(%rax), %ymm8, %ymm8
149 vaddpd _LHN(%rax), %ymm6, %ymm13
151 /* E = -r1+__fence(x1Hi*rcp1) */
152 vfmsub213pd %ymm6, %ymm1, %ymm9
155 vfmadd213pd %ymm9, %ymm1, %ymm3
158 vaddpd %ymm2, %ymm8, %ymm1
161 vaddpd %ymm13, %ymm1, %ymm8
163 /* Rl = T-T_Rh; -> -Rh */
164 vsubpd %ymm8, %ymm1, %ymm6
167 vaddpd %ymm6, %ymm13, %ymm1
169 /* T_Rh_Eh = T_Rh + E */
170 vaddpd %ymm3, %ymm8, %ymm6
173 vaddpd %ymm3, %ymm13, %ymm13
175 /* HLL = T_Rh - T_Rh_Eh; -> -Eh */
176 vsubpd %ymm6, %ymm8, %ymm9
179 vaddpd %ymm9, %ymm3, %ymm2
182 vaddpd %ymm1, %ymm2, %ymm8
185 vaddpd %ymm4, %ymm8, %ymm4
186 vmovupd _clv_2(%rax), %ymm8
188 /* HLL = HLL + (((((((a7)*cq+a6)*cq+a5)*cq+a4)*cq+a3)*cq+a2)*cq+a1)*cq */
189 vfmadd213pd _clv_3(%rax), %ymm13, %ymm8
190 vfmadd213pd _clv_4(%rax), %ymm13, %ymm8
191 vfmadd213pd _clv_5(%rax), %ymm13, %ymm8
192 vfmadd213pd _clv_6(%rax), %ymm13, %ymm8
193 vfmadd213pd _clv_7(%rax), %ymm13, %ymm8
194 vfmadd213pd %ymm4, %ymm13, %ymm8
196 /* T_Rh_Eh_HLLhi = T_Rh_Eh + HLL */
197 vaddpd %ymm8, %ymm6, %ymm9
199 /* HH = T_Rh_Eh_HLLhi; Lo(HH)&=0xf8000000 */
200 vandpd _iHighMask(%rax), %ymm9, %ymm2
203 2^(y*(HH+HL+HLL)) starts here:
204 yH = y; Lo(yH)&=0xf8000000;
206 vandpd _iHighMask(%rax), %ymm11, %ymm1
208 /* HLLhi = T_Rh_Eh_HLLhi - T_Rh_Eh */
209 vsubpd %ymm6, %ymm9, %ymm13
211 /* HL = T_Rh_Eh_HLLhi-HH */
212 vsubpd %ymm2, %ymm9, %ymm4
215 vmulpd %ymm2, %ymm1, %ymm9
217 /* HLL = HLL - HLLhi */
218 vsubpd %ymm13, %ymm8, %ymm6
221 vsubpd %ymm1, %ymm11, %ymm8
222 vextracti128 $1, %ymm9, %xmm3
223 vshufps $221, %xmm3, %xmm9, %xmm13
224 vpand _ABSMASK(%rax), %xmm13, %xmm3
225 vpcmpgtd %xmm5, %xmm7, %xmm13
226 vpcmpgtd _INF(%rax), %xmm10, %xmm7
227 vpcmpeqd _INF(%rax), %xmm10, %xmm10
228 vpor %xmm10, %xmm7, %xmm7
229 vpor %xmm7, %xmm13, %xmm5
231 /* pL=yL*HL+yH*HL; pL+=yL*HH */
232 vmulpd %ymm4, %ymm8, %ymm7
233 vpcmpgtd _DOMAINRANGE(%rax), %xmm3, %xmm13
234 vpcmpeqd _DOMAINRANGE(%rax), %xmm3, %xmm10
235 vpor %xmm10, %xmm13, %xmm3
236 vpor %xmm3, %xmm5, %xmm13
237 vfmadd213pd %ymm7, %ymm4, %ymm1
240 pHH = pH + *(double*)&db2p45_2p44
242 vaddpd _db2p45_2p44(%rax), %ymm9, %ymm7
243 vmovmskps %xmm13, %ecx
244 vfmadd213pd %ymm1, %ymm2, %ymm8
246 /* t=pL+pLL; t+=pHL */
247 vfmadd231pd %ymm11, %ymm6, %ymm8
248 vextracti128 $1, %ymm7, %xmm1
249 vshufps $136, %xmm1, %xmm7, %xmm10
252 _n = _n & 0xffffff80;
254 Hi(_2n) = (0x3ff+_n)<<20; Lo(_2n) = 0; -> 2^n
256 vpslld $13, %xmm10, %xmm2
257 vpaddd _iOne(%rax), %xmm2, %xmm13
258 vpshufd $80, %xmm13, %xmm4
259 vpshufd $250, %xmm13, %xmm1
261 /* j = Lo(pHH)&0x0000007f */
262 vandps _jIndexMask(%rax), %xmm10, %xmm3
264 /* T1 = ((double*)exp2_tbl)[ 2*j ] */
265 vpcmpeqd %ymm10, %ymm10, %ymm10
266 vpslld $4, %xmm3, %xmm5
268 /* pHH = pHH - *(double*)&db2p45_2p44 */
269 vsubpd _db2p45_2p44(%rax), %ymm7, %ymm7
272 vsubpd %ymm7, %ymm9, %ymm9
273 vaddpd %ymm9, %ymm8, %ymm6
274 vinserti128 $1, %xmm1, %ymm4, %ymm2
275 vxorpd %ymm1, %ymm1, %ymm1
276 vgatherdpd %ymm10, 36416(%rax,%xmm5), %ymm1
277 vandpd _ifff0000000000000(%rax), %ymm2, %ymm13
278 vmovupd _cev_1(%rax), %ymm2
279 vmulpd %ymm1, %ymm13, %ymm1
280 vfmadd213pd _cev_2(%rax), %ymm6, %ymm2
281 vmulpd %ymm6, %ymm1, %ymm8
282 vfmadd213pd _cev_3(%rax), %ymm6, %ymm2
283 vfmadd213pd _cev_4(%rax), %ymm6, %ymm2
284 vfmadd213pd _cev_5(%rax), %ymm6, %ymm2
285 vfmadd213pd %ymm1, %ymm8, %ymm2
291 vmovups 224(%rsp), %ymm8
292 vmovups 384(%rsp), %ymm9
293 vmovups 352(%rsp), %ymm10
294 vmovups 160(%rsp), %ymm11
295 vmovups 288(%rsp), %ymm13
298 cfi_def_cfa_register (%rsp)
300 cfi_adjust_cfa_offset (-8)
306 vmovupd %ymm0, 192(%rsp)
307 vmovupd %ymm11, 256(%rsp)
308 vmovupd %ymm2, 320(%rsp)
313 vmovups %ymm12, 64(%rsp)
314 vmovups %ymm14, 32(%rsp)
315 vmovups %ymm15, (%rsp)
319 cfi_offset_rel_rsp (12, 136)
322 cfi_offset_rel_rsp (13, 128)
325 cfi_offset_rel_rsp (14, 120)
328 cfi_offset_rel_rsp (15, 112)
346 vmovups 64(%rsp), %ymm12
347 vmovups 32(%rsp), %ymm14
348 vmovups (%rsp), %ymm15
349 vmovupd 320(%rsp), %ymm2
366 vmovsd 200(%rsp,%r15), %xmm0
367 vmovsd 264(%rsp,%r15), %xmm1
372 vmovsd %xmm0, 328(%rsp,%r15)
378 vmovsd 192(%rsp,%r15), %xmm0
379 vmovsd 256(%rsp,%r15), %xmm1
384 vmovsd %xmm0, 320(%rsp,%r15)
387 END (_ZGVdN4vv_pow_avx2)