Add missing export of fallocate64 on 32-bit platforms.
[glibc.git] / sysdeps / ieee754 / dbl-64 / s_tan.c
blob4e26d90ae119a24ca128a1b510ee40e785894bba
1 /*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001, 2009 Free Software Foundation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 /*********************************************************************/
21 /* MODULE_NAME: utan.c */
22 /* */
23 /* FUNCTIONS: utan */
24 /* tanMp */
25 /* */
26 /* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
27 /* branred.c sincos32.c mptan.c */
28 /* utan.tbl */
29 /* */
30 /* An ultimate tan routine. Given an IEEE double machine number x */
31 /* it computes the correctly rounded (to nearest) value of tan(x). */
32 /* Assumption: Machine arithmetic operations are performed in */
33 /* round to nearest mode of IEEE 754 standard. */
34 /* */
35 /*********************************************************************/
37 #include <errno.h>
38 #include "endian.h"
39 #include "dla.h"
40 #include "mpa.h"
41 #include "MathLib.h"
42 #include "math.h"
44 static double tanMp(double);
45 void __mptan(double, mp_no *, int);
47 double tan(double x) {
48 #include "utan.h"
49 #include "utan.tbl"
51 int ux,i,n;
52 double a,da,a2,b,db,c,dc,c1,cc1,c2,cc2,c3,cc3,fi,ffi,gi,pz,s,sy,
53 t,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,w,x2,xn,xx2,y,ya,yya,z0,z,zz,z2,zz2;
54 int p;
55 number num,v;
56 mp_no mpa,mpt1,mpt2;
57 #if 0
58 mp_no mpy;
59 #endif
61 int __branred(double, double *, double *);
62 int __mpranred(double, mp_no *, int);
64 /* x=+-INF, x=NaN */
65 num.d = x; ux = num.i[HIGH_HALF];
66 if ((ux&0x7ff00000)==0x7ff00000) {
67 if ((ux&0x7fffffff)==0x7ff00000)
68 __set_errno (EDOM);
69 return x-x;
72 w=(x<ZERO) ? -x : x;
74 /* (I) The case abs(x) <= 1.259e-8 */
75 if (w<=g1.d) return x;
77 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
78 if (w<=g2.d) {
80 /* First stage */
81 x2 = x*x;
82 t2 = x*x2*(d3.d+x2*(d5.d+x2*(d7.d+x2*(d9.d+x2*d11.d))));
83 if ((y=x+(t2-u1.d*t2)) == x+(t2+u1.d*t2)) return y;
85 /* Second stage */
86 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
87 x2*a27.d))))));
88 EMULV(x,x,x2,xx2,t1,t2,t3,t4,t5)
89 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
90 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
91 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
92 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
93 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
94 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
95 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
96 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
97 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
98 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
99 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
100 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
101 MUL2(x ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
102 ADD2(x ,zero.d,c2,cc2,c1,cc1,t1,t2)
103 if ((y=c1+(cc1-u2.d*c1)) == c1+(cc1+u2.d*c1)) return y;
104 return tanMp(x);
107 /* (III) The case 0.0608 < abs(x) <= 0.787 */
108 if (w<=g3.d) {
110 /* First stage */
111 i = ((int) (mfftnhf.d+TWO8*w));
112 z = w-xfg[i][0].d; z2 = z*z; s = (x<ZERO) ? MONE : ONE;
113 pz = z+z*z2*(e0.d+z2*e1.d);
114 fi = xfg[i][1].d; gi = xfg[i][2].d; t2 = pz*(gi+fi)/(gi-pz);
115 if ((y=fi+(t2-fi*u3.d))==fi+(t2+fi*u3.d)) return (s*y);
116 t3 = (t2<ZERO) ? -t2 : t2;
117 t4 = fi*ua3.d+t3*ub3.d;
118 if ((y=fi+(t2-t4))==fi+(t2+t4)) return (s*y);
120 /* Second stage */
121 ffi = xfg[i][3].d;
122 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
123 EMULV(z,z,z2,zz2,t1,t2,t3,t4,t5)
124 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
125 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
126 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
127 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
128 MUL2(z ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
129 ADD2(z ,zero.d,c2,cc2,c1,cc1,t1,t2)
131 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
132 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
133 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
134 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
136 if ((y=c3+(cc3-u4.d*c3))==c3+(cc3+u4.d*c3)) return (s*y);
137 return tanMp(x);
140 /* (---) The case 0.787 < abs(x) <= 25 */
141 if (w<=g4.d) {
142 /* Range reduction by algorithm i */
143 t = (x*hpinv.d + toint.d);
144 xn = t - toint.d;
145 v.d = t;
146 t1 = (x - xn*mp1.d) - xn*mp2.d;
147 n =v.i[LOW_HALF] & 0x00000001;
148 da = xn*mp3.d;
149 a=t1-da;
150 da = (t1-a)-da;
151 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
152 else {ya= a; yya= da; sy= ONE;}
154 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
155 if (ya<=gy1.d) return tanMp(x);
157 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
158 if (ya<=gy2.d) {
159 a2 = a*a;
160 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
161 if (n) {
162 /* First stage -cot */
163 EADD(a,t2,b,db)
164 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
165 if ((y=c+(dc-u6.d*c))==c+(dc+u6.d*c)) return (-y); }
166 else {
167 /* First stage tan */
168 if ((y=a+(t2-u5.d*a))==a+(t2+u5.d*a)) return y; }
169 /* Second stage */
170 /* Range reduction by algorithm ii */
171 t = (x*hpinv.d + toint.d);
172 xn = t - toint.d;
173 v.d = t;
174 t1 = (x - xn*mp1.d) - xn*mp2.d;
175 n =v.i[LOW_HALF] & 0x00000001;
176 da = xn*pp3.d;
177 t=t1-da;
178 da = (t1-t)-da;
179 t1 = xn*pp4.d;
180 a = t - t1;
181 da = ((t-a)-t1)+da;
183 /* Second stage */
184 EADD(a,da,t1,t2) a=t1; da=t2;
185 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
186 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
187 x2*a27.d))))));
188 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
189 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
190 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
191 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
192 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
193 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
194 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
195 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
196 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
197 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
198 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
199 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
200 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
201 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
203 if (n) {
204 /* Second stage -cot */
205 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
206 if ((y=c2+(cc2-u8.d*c2)) == c2+(cc2+u8.d*c2)) return (-y); }
207 else {
208 /* Second stage tan */
209 if ((y=c1+(cc1-u7.d*c1)) == c1+(cc1+u7.d*c1)) return y; }
210 return tanMp(x);
213 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
215 /* First stage */
216 i = ((int) (mfftnhf.d+TWO8*ya));
217 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
218 pz = z+z*z2*(e0.d+z2*e1.d);
219 fi = xfg[i][1].d; gi = xfg[i][2].d;
221 if (n) {
222 /* -cot */
223 t2 = pz*(fi+gi)/(fi+pz);
224 if ((y=gi-(t2-gi*u10.d))==gi-(t2+gi*u10.d)) return (-sy*y);
225 t3 = (t2<ZERO) ? -t2 : t2;
226 t4 = gi*ua10.d+t3*ub10.d;
227 if ((y=gi-(t2-t4))==gi-(t2+t4)) return (-sy*y); }
228 else {
229 /* tan */
230 t2 = pz*(gi+fi)/(gi-pz);
231 if ((y=fi+(t2-fi*u9.d))==fi+(t2+fi*u9.d)) return (sy*y);
232 t3 = (t2<ZERO) ? -t2 : t2;
233 t4 = fi*ua9.d+t3*ub9.d;
234 if ((y=fi+(t2-t4))==fi+(t2+t4)) return (sy*y); }
236 /* Second stage */
237 ffi = xfg[i][3].d;
238 EADD(z0,yya,z,zz)
239 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
240 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
241 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
242 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
243 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
244 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
245 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
246 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
248 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
249 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
250 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
252 if (n) {
253 /* -cot */
254 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
255 if ((y=c3+(cc3-u12.d*c3))==c3+(cc3+u12.d*c3)) return (-sy*y); }
256 else {
257 /* tan */
258 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
259 if ((y=c3+(cc3-u11.d*c3))==c3+(cc3+u11.d*c3)) return (sy*y); }
261 return tanMp(x);
264 /* (---) The case 25 < abs(x) <= 1e8 */
265 if (w<=g5.d) {
266 /* Range reduction by algorithm ii */
267 t = (x*hpinv.d + toint.d);
268 xn = t - toint.d;
269 v.d = t;
270 t1 = (x - xn*mp1.d) - xn*mp2.d;
271 n =v.i[LOW_HALF] & 0x00000001;
272 da = xn*pp3.d;
273 t=t1-da;
274 da = (t1-t)-da;
275 t1 = xn*pp4.d;
276 a = t - t1;
277 da = ((t-a)-t1)+da;
278 EADD(a,da,t1,t2) a=t1; da=t2;
279 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
280 else {ya= a; yya= da; sy= ONE;}
282 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
283 if (ya<=gy1.d) return tanMp(x);
285 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
286 if (ya<=gy2.d) {
287 a2 = a*a;
288 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
289 if (n) {
290 /* First stage -cot */
291 EADD(a,t2,b,db)
292 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
293 if ((y=c+(dc-u14.d*c))==c+(dc+u14.d*c)) return (-y); }
294 else {
295 /* First stage tan */
296 if ((y=a+(t2-u13.d*a))==a+(t2+u13.d*a)) return y; }
298 /* Second stage */
299 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
300 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
301 x2*a27.d))))));
302 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
303 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
304 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
305 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
306 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
307 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
308 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
309 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
310 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
311 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
312 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
313 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
314 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
315 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
317 if (n) {
318 /* Second stage -cot */
319 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
320 if ((y=c2+(cc2-u16.d*c2)) == c2+(cc2+u16.d*c2)) return (-y); }
321 else {
322 /* Second stage tan */
323 if ((y=c1+(cc1-u15.d*c1)) == c1+(cc1+u15.d*c1)) return (y); }
324 return tanMp(x);
327 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
328 /* First stage */
329 i = ((int) (mfftnhf.d+TWO8*ya));
330 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
331 pz = z+z*z2*(e0.d+z2*e1.d);
332 fi = xfg[i][1].d; gi = xfg[i][2].d;
334 if (n) {
335 /* -cot */
336 t2 = pz*(fi+gi)/(fi+pz);
337 if ((y=gi-(t2-gi*u18.d))==gi-(t2+gi*u18.d)) return (-sy*y);
338 t3 = (t2<ZERO) ? -t2 : t2;
339 t4 = gi*ua18.d+t3*ub18.d;
340 if ((y=gi-(t2-t4))==gi-(t2+t4)) return (-sy*y); }
341 else {
342 /* tan */
343 t2 = pz*(gi+fi)/(gi-pz);
344 if ((y=fi+(t2-fi*u17.d))==fi+(t2+fi*u17.d)) return (sy*y);
345 t3 = (t2<ZERO) ? -t2 : t2;
346 t4 = fi*ua17.d+t3*ub17.d;
347 if ((y=fi+(t2-t4))==fi+(t2+t4)) return (sy*y); }
349 /* Second stage */
350 ffi = xfg[i][3].d;
351 EADD(z0,yya,z,zz)
352 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
353 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
354 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
355 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
356 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
357 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
358 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
359 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
361 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
362 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
363 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
365 if (n) {
366 /* -cot */
367 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
368 if ((y=c3+(cc3-u20.d*c3))==c3+(cc3+u20.d*c3)) return (-sy*y); }
369 else {
370 /* tan */
371 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
372 if ((y=c3+(cc3-u19.d*c3))==c3+(cc3+u19.d*c3)) return (sy*y); }
373 return tanMp(x);
376 /* (---) The case 1e8 < abs(x) < 2**1024 */
377 /* Range reduction by algorithm iii */
378 n = (__branred(x,&a,&da)) & 0x00000001;
379 EADD(a,da,t1,t2) a=t1; da=t2;
380 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
381 else {ya= a; yya= da; sy= ONE;}
383 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
384 if (ya<=gy1.d) return tanMp(x);
386 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
387 if (ya<=gy2.d) {
388 a2 = a*a;
389 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
390 if (n) {
391 /* First stage -cot */
392 EADD(a,t2,b,db)
393 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
394 if ((y=c+(dc-u22.d*c))==c+(dc+u22.d*c)) return (-y); }
395 else {
396 /* First stage tan */
397 if ((y=a+(t2-u21.d*a))==a+(t2+u21.d*a)) return y; }
399 /* Second stage */
400 /* Reduction by algorithm iv */
401 p=10; n = (__mpranred(x,&mpa,p)) & 0x00000001;
402 __mp_dbl(&mpa,&a,p); __dbl_mp(a,&mpt1,p);
403 __sub(&mpa,&mpt1,&mpt2,p); __mp_dbl(&mpt2,&da,p);
405 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
406 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
407 x2*a27.d))))));
408 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
409 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
410 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
411 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
412 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
413 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
414 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
415 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
416 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
417 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
418 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
419 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
420 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
421 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
423 if (n) {
424 /* Second stage -cot */
425 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
426 if ((y=c2+(cc2-u24.d*c2)) == c2+(cc2+u24.d*c2)) return (-y); }
427 else {
428 /* Second stage tan */
429 if ((y=c1+(cc1-u23.d*c1)) == c1+(cc1+u23.d*c1)) return y; }
430 return tanMp(x);
433 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
434 /* First stage */
435 i = ((int) (mfftnhf.d+TWO8*ya));
436 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
437 pz = z+z*z2*(e0.d+z2*e1.d);
438 fi = xfg[i][1].d; gi = xfg[i][2].d;
440 if (n) {
441 /* -cot */
442 t2 = pz*(fi+gi)/(fi+pz);
443 if ((y=gi-(t2-gi*u26.d))==gi-(t2+gi*u26.d)) return (-sy*y);
444 t3 = (t2<ZERO) ? -t2 : t2;
445 t4 = gi*ua26.d+t3*ub26.d;
446 if ((y=gi-(t2-t4))==gi-(t2+t4)) return (-sy*y); }
447 else {
448 /* tan */
449 t2 = pz*(gi+fi)/(gi-pz);
450 if ((y=fi+(t2-fi*u25.d))==fi+(t2+fi*u25.d)) return (sy*y);
451 t3 = (t2<ZERO) ? -t2 : t2;
452 t4 = fi*ua25.d+t3*ub25.d;
453 if ((y=fi+(t2-t4))==fi+(t2+t4)) return (sy*y); }
455 /* Second stage */
456 ffi = xfg[i][3].d;
457 EADD(z0,yya,z,zz)
458 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
459 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
460 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
461 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
462 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
463 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
464 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
465 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
467 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
468 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
469 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
471 if (n) {
472 /* -cot */
473 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
474 if ((y=c3+(cc3-u28.d*c3))==c3+(cc3+u28.d*c3)) return (-sy*y); }
475 else {
476 /* tan */
477 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
478 if ((y=c3+(cc3-u27.d*c3))==c3+(cc3+u27.d*c3)) return (sy*y); }
479 return tanMp(x);
483 /* multiple precision stage */
484 /* Convert x to multi precision number,compute tan(x) by mptan() routine */
485 /* and converts result back to double */
486 static double tanMp(double x)
488 int p;
489 double y;
490 mp_no mpy;
491 p=32;
492 __mptan(x, &mpy, p);
493 __mp_dbl(&mpy,&y,p);
494 return y;
497 #ifdef NO_LONG_DOUBLE
498 weak_alias (tan, tanl)
499 #endif