* sysdeps/i386/fpu/s_tan.S: Set errno for ±Inf.
[glibc.git] / sysdeps / ieee754 / dbl-64 / s_tan.c
blob6e0f7d2f6f5f7c6cba47e55752b45c9bd55c53b7
1 /*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001, 2009 Free Software Foundation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 /*********************************************************************/
21 /* MODULE_NAME: utan.c */
22 /* */
23 /* FUNCTIONS: utan */
24 /* tanMp */
25 /* */
26 /* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
27 /* branred.c sincos32.c mptan.c */
28 /* utan.tbl */
29 /* */
30 /* An ultimate tan routine. Given an IEEE double machine number x */
31 /* it computes the correctly rounded (to nearest) value of tan(x). */
32 /* Assumption: Machine arithmetic operations are performed in */
33 /* round to nearest mode of IEEE 754 standard. */
34 /* */
35 /*********************************************************************/
37 #include <errno.h>
38 #include "endian.h"
39 #include "dla.h"
40 #include "mpa.h"
41 #include "MathLib.h"
42 #include "math.h"
44 static double tanMp(double);
45 void __mptan(double, mp_no *, int);
47 double tan(double x) {
48 #include "utan.h"
49 #include "utan.tbl"
51 int ux,i,n;
52 double a,da,a2,b,db,c,dc,c1,cc1,c2,cc2,c3,cc3,fi,ffi,gi,pz,s,sy,
53 t,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,w,x2,xn,xx2,y,ya,yya,z0,z,zz,z2,zz2;
54 int p;
55 number num,v;
56 mp_no mpa,mpt1,mpt2;
57 #if 0
58 mp_no mpy;
59 #endif
61 int __branred(double, double *, double *);
62 int __mpranred(double, mp_no *, int);
64 /* x=+-INF, x=NaN */
65 num.d = x; ux = num.i[HIGH_HALF];
66 if ((ux&0x7ff00000)==0x7ff00000) {
67 if ((ux&0x7fffffff)==0x7ff00000)
68 __set_errno (EDOM);
69 return x-x;
72 w=(x<ZERO) ? -x : x;
74 /* (I) The case abs(x) <= 1.259e-8 */
75 if (w<=g1.d) return x;
77 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
78 if (w<=g2.d) {
80 /* First stage */
81 x2 = x*x;
82 t2 = x*x2*(d3.d+x2*(d5.d+x2*(d7.d+x2*(d9.d+x2*d11.d))));
83 if ((y=x+(t2-u1.d*t2)) == x+(t2+u1.d*t2)) return y;
85 /* Second stage */
86 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
87 x2*a27.d))))));
88 EMULV(x,x,x2,xx2,t1,t2,t3,t4,t5)
89 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
90 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
91 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
92 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
93 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
94 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
95 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
96 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
97 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
98 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
99 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
100 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
101 MUL2(x ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
102 ADD2(x ,zero.d,c2,cc2,c1,cc1,t1,t2)
103 if ((y=c1+(cc1-u2.d*c1)) == c1+(cc1+u2.d*c1)) return y;
104 return tanMp(x);
107 /* (III) The case 0.0608 < abs(x) <= 0.787 */
108 if (w<=g3.d) {
110 /* First stage */
111 i = ((int) (mfftnhf.d+TWO8*w));
112 z = w-xfg[i][0].d; z2 = z*z; s = (x<ZERO) ? MONE : ONE;
113 pz = z+z*z2*(e0.d+z2*e1.d);
114 fi = xfg[i][1].d; gi = xfg[i][2].d; t2 = pz*(gi+fi)/(gi-pz);
115 if ((y=fi+(t2-fi*u3.d))==fi+(t2+fi*u3.d)) return (s*y);
116 t3 = (t2<ZERO) ? -t2 : t2;
117 if ((y=fi+(t2-(t4=fi*ua3.d+t3*ub3.d)))==fi+(t2+t4)) return (s*y);
119 /* Second stage */
120 ffi = xfg[i][3].d;
121 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
122 EMULV(z,z,z2,zz2,t1,t2,t3,t4,t5)
123 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
124 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
125 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
126 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
127 MUL2(z ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
128 ADD2(z ,zero.d,c2,cc2,c1,cc1,t1,t2)
130 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
131 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
132 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
133 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
135 if ((y=c3+(cc3-u4.d*c3))==c3+(cc3+u4.d*c3)) return (s*y);
136 return tanMp(x);
139 /* (---) The case 0.787 < abs(x) <= 25 */
140 if (w<=g4.d) {
141 /* Range reduction by algorithm i */
142 t = (x*hpinv.d + toint.d);
143 xn = t - toint.d;
144 v.d = t;
145 t1 = (x - xn*mp1.d) - xn*mp2.d;
146 n =v.i[LOW_HALF] & 0x00000001;
147 da = xn*mp3.d;
148 a=t1-da;
149 da = (t1-a)-da;
150 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
151 else {ya= a; yya= da; sy= ONE;}
153 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
154 if (ya<=gy1.d) return tanMp(x);
156 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
157 if (ya<=gy2.d) {
158 a2 = a*a;
159 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
160 if (n) {
161 /* First stage -cot */
162 EADD(a,t2,b,db)
163 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
164 if ((y=c+(dc-u6.d*c))==c+(dc+u6.d*c)) return (-y); }
165 else {
166 /* First stage tan */
167 if ((y=a+(t2-u5.d*a))==a+(t2+u5.d*a)) return y; }
168 /* Second stage */
169 /* Range reduction by algorithm ii */
170 t = (x*hpinv.d + toint.d);
171 xn = t - toint.d;
172 v.d = t;
173 t1 = (x - xn*mp1.d) - xn*mp2.d;
174 n =v.i[LOW_HALF] & 0x00000001;
175 da = xn*pp3.d;
176 t=t1-da;
177 da = (t1-t)-da;
178 t1 = xn*pp4.d;
179 a = t - t1;
180 da = ((t-a)-t1)+da;
182 /* Second stage */
183 EADD(a,da,t1,t2) a=t1; da=t2;
184 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
185 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
186 x2*a27.d))))));
187 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
188 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
189 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
190 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
191 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
192 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
193 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
194 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
195 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
196 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
197 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
198 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
199 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
200 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
202 if (n) {
203 /* Second stage -cot */
204 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
205 if ((y=c2+(cc2-u8.d*c2)) == c2+(cc2+u8.d*c2)) return (-y); }
206 else {
207 /* Second stage tan */
208 if ((y=c1+(cc1-u7.d*c1)) == c1+(cc1+u7.d*c1)) return y; }
209 return tanMp(x);
212 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
214 /* First stage */
215 i = ((int) (mfftnhf.d+TWO8*ya));
216 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
217 pz = z+z*z2*(e0.d+z2*e1.d);
218 fi = xfg[i][1].d; gi = xfg[i][2].d;
220 if (n) {
221 /* -cot */
222 t2 = pz*(fi+gi)/(fi+pz);
223 if ((y=gi-(t2-gi*u10.d))==gi-(t2+gi*u10.d)) return (-sy*y);
224 t3 = (t2<ZERO) ? -t2 : t2;
225 if ((y=gi-(t2-(t4=gi*ua10.d+t3*ub10.d)))==gi-(t2+t4)) return (-sy*y); }
226 else {
227 /* tan */
228 t2 = pz*(gi+fi)/(gi-pz);
229 if ((y=fi+(t2-fi*u9.d))==fi+(t2+fi*u9.d)) return (sy*y);
230 t3 = (t2<ZERO) ? -t2 : t2;
231 if ((y=fi+(t2-(t4=fi*ua9.d+t3*ub9.d)))==fi+(t2+t4)) return (sy*y); }
233 /* Second stage */
234 ffi = xfg[i][3].d;
235 EADD(z0,yya,z,zz)
236 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
237 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
238 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
239 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
240 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
241 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
242 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
243 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
245 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
246 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
247 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
249 if (n) {
250 /* -cot */
251 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
252 if ((y=c3+(cc3-u12.d*c3))==c3+(cc3+u12.d*c3)) return (-sy*y); }
253 else {
254 /* tan */
255 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
256 if ((y=c3+(cc3-u11.d*c3))==c3+(cc3+u11.d*c3)) return (sy*y); }
258 return tanMp(x);
261 /* (---) The case 25 < abs(x) <= 1e8 */
262 if (w<=g5.d) {
263 /* Range reduction by algorithm ii */
264 t = (x*hpinv.d + toint.d);
265 xn = t - toint.d;
266 v.d = t;
267 t1 = (x - xn*mp1.d) - xn*mp2.d;
268 n =v.i[LOW_HALF] & 0x00000001;
269 da = xn*pp3.d;
270 t=t1-da;
271 da = (t1-t)-da;
272 t1 = xn*pp4.d;
273 a = t - t1;
274 da = ((t-a)-t1)+da;
275 EADD(a,da,t1,t2) a=t1; da=t2;
276 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
277 else {ya= a; yya= da; sy= ONE;}
279 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
280 if (ya<=gy1.d) return tanMp(x);
282 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
283 if (ya<=gy2.d) {
284 a2 = a*a;
285 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
286 if (n) {
287 /* First stage -cot */
288 EADD(a,t2,b,db)
289 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
290 if ((y=c+(dc-u14.d*c))==c+(dc+u14.d*c)) return (-y); }
291 else {
292 /* First stage tan */
293 if ((y=a+(t2-u13.d*a))==a+(t2+u13.d*a)) return y; }
295 /* Second stage */
296 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
297 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
298 x2*a27.d))))));
299 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
300 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
301 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
302 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
303 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
304 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
305 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
306 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
307 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
308 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
309 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
310 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
311 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
312 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
314 if (n) {
315 /* Second stage -cot */
316 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
317 if ((y=c2+(cc2-u16.d*c2)) == c2+(cc2+u16.d*c2)) return (-y); }
318 else {
319 /* Second stage tan */
320 if ((y=c1+(cc1-u15.d*c1)) == c1+(cc1+u15.d*c1)) return (y); }
321 return tanMp(x);
324 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
325 /* First stage */
326 i = ((int) (mfftnhf.d+TWO8*ya));
327 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
328 pz = z+z*z2*(e0.d+z2*e1.d);
329 fi = xfg[i][1].d; gi = xfg[i][2].d;
331 if (n) {
332 /* -cot */
333 t2 = pz*(fi+gi)/(fi+pz);
334 if ((y=gi-(t2-gi*u18.d))==gi-(t2+gi*u18.d)) return (-sy*y);
335 t3 = (t2<ZERO) ? -t2 : t2;
336 if ((y=gi-(t2-(t4=gi*ua18.d+t3*ub18.d)))==gi-(t2+t4)) return (-sy*y); }
337 else {
338 /* tan */
339 t2 = pz*(gi+fi)/(gi-pz);
340 if ((y=fi+(t2-fi*u17.d))==fi+(t2+fi*u17.d)) return (sy*y);
341 t3 = (t2<ZERO) ? -t2 : t2;
342 if ((y=fi+(t2-(t4=fi*ua17.d+t3*ub17.d)))==fi+(t2+t4)) return (sy*y); }
344 /* Second stage */
345 ffi = xfg[i][3].d;
346 EADD(z0,yya,z,zz)
347 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
348 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
349 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
350 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
351 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
352 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
353 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
354 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
356 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
357 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
358 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
360 if (n) {
361 /* -cot */
362 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
363 if ((y=c3+(cc3-u20.d*c3))==c3+(cc3+u20.d*c3)) return (-sy*y); }
364 else {
365 /* tan */
366 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
367 if ((y=c3+(cc3-u19.d*c3))==c3+(cc3+u19.d*c3)) return (sy*y); }
368 return tanMp(x);
371 /* (---) The case 1e8 < abs(x) < 2**1024 */
372 /* Range reduction by algorithm iii */
373 n = (__branred(x,&a,&da)) & 0x00000001;
374 EADD(a,da,t1,t2) a=t1; da=t2;
375 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
376 else {ya= a; yya= da; sy= ONE;}
378 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
379 if (ya<=gy1.d) return tanMp(x);
381 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
382 if (ya<=gy2.d) {
383 a2 = a*a;
384 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
385 if (n) {
386 /* First stage -cot */
387 EADD(a,t2,b,db)
388 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
389 if ((y=c+(dc-u22.d*c))==c+(dc+u22.d*c)) return (-y); }
390 else {
391 /* First stage tan */
392 if ((y=a+(t2-u21.d*a))==a+(t2+u21.d*a)) return y; }
394 /* Second stage */
395 /* Reduction by algorithm iv */
396 p=10; n = (__mpranred(x,&mpa,p)) & 0x00000001;
397 __mp_dbl(&mpa,&a,p); __dbl_mp(a,&mpt1,p);
398 __sub(&mpa,&mpt1,&mpt2,p); __mp_dbl(&mpt2,&da,p);
400 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
401 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
402 x2*a27.d))))));
403 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
404 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
405 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
406 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
407 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
408 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
409 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
410 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
411 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
412 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
413 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
414 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
415 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
416 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
418 if (n) {
419 /* Second stage -cot */
420 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
421 if ((y=c2+(cc2-u24.d*c2)) == c2+(cc2+u24.d*c2)) return (-y); }
422 else {
423 /* Second stage tan */
424 if ((y=c1+(cc1-u23.d*c1)) == c1+(cc1+u23.d*c1)) return y; }
425 return tanMp(x);
428 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
429 /* First stage */
430 i = ((int) (mfftnhf.d+TWO8*ya));
431 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
432 pz = z+z*z2*(e0.d+z2*e1.d);
433 fi = xfg[i][1].d; gi = xfg[i][2].d;
435 if (n) {
436 /* -cot */
437 t2 = pz*(fi+gi)/(fi+pz);
438 if ((y=gi-(t2-gi*u26.d))==gi-(t2+gi*u26.d)) return (-sy*y);
439 t3 = (t2<ZERO) ? -t2 : t2;
440 if ((y=gi-(t2-(t4=gi*ua26.d+t3*ub26.d)))==gi-(t2+t4)) return (-sy*y); }
441 else {
442 /* tan */
443 t2 = pz*(gi+fi)/(gi-pz);
444 if ((y=fi+(t2-fi*u25.d))==fi+(t2+fi*u25.d)) return (sy*y);
445 t3 = (t2<ZERO) ? -t2 : t2;
446 if ((y=fi+(t2-(t4=fi*ua25.d+t3*ub25.d)))==fi+(t2+t4)) return (sy*y); }
448 /* Second stage */
449 ffi = xfg[i][3].d;
450 EADD(z0,yya,z,zz)
451 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
452 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
453 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
454 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
455 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
456 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
457 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
458 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
460 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
461 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
462 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
464 if (n) {
465 /* -cot */
466 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
467 if ((y=c3+(cc3-u28.d*c3))==c3+(cc3+u28.d*c3)) return (-sy*y); }
468 else {
469 /* tan */
470 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
471 if ((y=c3+(cc3-u27.d*c3))==c3+(cc3+u27.d*c3)) return (sy*y); }
472 return tanMp(x);
476 /* multiple precision stage */
477 /* Convert x to multi precision number,compute tan(x) by mptan() routine */
478 /* and converts result back to double */
479 static double tanMp(double x)
481 int p;
482 double y;
483 mp_no mpy;
484 p=32;
485 __mptan(x, &mpy, p);
486 __mp_dbl(&mpy,&y,p);
487 return y;
490 #ifdef NO_LONG_DOUBLE
491 weak_alias (tan, tanl)
492 #endif