Replace FSF snail mail address with URLs.
[glibc.git] / sysdeps / ieee754 / dbl-64 / s_tan.c
blob962a4eba6bc5b3c97fa4d6ed9041873f751763c0
1 /*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001, 2009, 2011 Free Software Foundation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 /*********************************************************************/
20 /* MODULE_NAME: utan.c */
21 /* */
22 /* FUNCTIONS: utan */
23 /* tanMp */
24 /* */
25 /* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26 /* branred.c sincos32.c mptan.c */
27 /* utan.tbl */
28 /* */
29 /* An ultimate tan routine. Given an IEEE double machine number x */
30 /* it computes the correctly rounded (to nearest) value of tan(x). */
31 /* Assumption: Machine arithmetic operations are performed in */
32 /* round to nearest mode of IEEE 754 standard. */
33 /* */
34 /*********************************************************************/
36 #include <errno.h>
37 #include "endian.h"
38 #include <dla.h>
39 #include "mpa.h"
40 #include "MathLib.h"
41 #include "math.h"
43 #ifndef SECTION
44 # define SECTION
45 #endif
47 static double tanMp(double);
48 void __mptan(double, mp_no *, int);
50 double
51 SECTION
52 tan(double x) {
53 #include "utan.h"
54 #include "utan.tbl"
56 int ux,i,n;
57 double a,da,a2,b,db,c,dc,c1,cc1,c2,cc2,c3,cc3,fi,ffi,gi,pz,s,sy,
58 t,t1,t2,t3,t4,t7,t8,t9,t10,w,x2,xn,xx2,y,ya,yya,z0,z,zz,z2,zz2;
59 #ifndef DLA_FMS
60 double t5,t6;
61 #endif
62 int p;
63 number num,v;
64 mp_no mpa,mpt1,mpt2;
65 #if 0
66 mp_no mpy;
67 #endif
69 int __branred(double, double *, double *);
70 int __mpranred(double, mp_no *, int);
72 /* x=+-INF, x=NaN */
73 num.d = x; ux = num.i[HIGH_HALF];
74 if ((ux&0x7ff00000)==0x7ff00000) {
75 if ((ux&0x7fffffff)==0x7ff00000)
76 __set_errno (EDOM);
77 return x-x;
80 w=(x<ZERO) ? -x : x;
82 /* (I) The case abs(x) <= 1.259e-8 */
83 if (w<=g1.d) return x;
85 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
86 if (w<=g2.d) {
88 /* First stage */
89 x2 = x*x;
90 t2 = x*x2*(d3.d+x2*(d5.d+x2*(d7.d+x2*(d9.d+x2*d11.d))));
91 if ((y=x+(t2-u1.d*t2)) == x+(t2+u1.d*t2)) return y;
93 /* Second stage */
94 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
95 x2*a27.d))))));
96 EMULV(x,x,x2,xx2,t1,t2,t3,t4,t5)
97 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
98 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
99 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
100 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
101 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
102 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
103 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
104 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
105 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
106 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
107 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
108 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
109 MUL2(x ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
110 ADD2(x ,zero.d,c2,cc2,c1,cc1,t1,t2)
111 if ((y=c1+(cc1-u2.d*c1)) == c1+(cc1+u2.d*c1)) return y;
112 return tanMp(x);
115 /* (III) The case 0.0608 < abs(x) <= 0.787 */
116 if (w<=g3.d) {
118 /* First stage */
119 i = ((int) (mfftnhf.d+TWO8*w));
120 z = w-xfg[i][0].d; z2 = z*z; s = (x<ZERO) ? MONE : ONE;
121 pz = z+z*z2*(e0.d+z2*e1.d);
122 fi = xfg[i][1].d; gi = xfg[i][2].d; t2 = pz*(gi+fi)/(gi-pz);
123 if ((y=fi+(t2-fi*u3.d))==fi+(t2+fi*u3.d)) return (s*y);
124 t3 = (t2<ZERO) ? -t2 : t2;
125 t4 = fi*ua3.d+t3*ub3.d;
126 if ((y=fi+(t2-t4))==fi+(t2+t4)) return (s*y);
128 /* Second stage */
129 ffi = xfg[i][3].d;
130 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
131 EMULV(z,z,z2,zz2,t1,t2,t3,t4,t5)
132 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
133 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
134 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
135 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
136 MUL2(z ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
137 ADD2(z ,zero.d,c2,cc2,c1,cc1,t1,t2)
139 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
140 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
141 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
142 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
144 if ((y=c3+(cc3-u4.d*c3))==c3+(cc3+u4.d*c3)) return (s*y);
145 return tanMp(x);
148 /* (---) The case 0.787 < abs(x) <= 25 */
149 if (w<=g4.d) {
150 /* Range reduction by algorithm i */
151 t = (x*hpinv.d + toint.d);
152 xn = t - toint.d;
153 v.d = t;
154 t1 = (x - xn*mp1.d) - xn*mp2.d;
155 n =v.i[LOW_HALF] & 0x00000001;
156 da = xn*mp3.d;
157 a=t1-da;
158 da = (t1-a)-da;
159 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
160 else {ya= a; yya= da; sy= ONE;}
162 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
163 if (ya<=gy1.d) return tanMp(x);
165 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
166 if (ya<=gy2.d) {
167 a2 = a*a;
168 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
169 if (n) {
170 /* First stage -cot */
171 EADD(a,t2,b,db)
172 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
173 if ((y=c+(dc-u6.d*c))==c+(dc+u6.d*c)) return (-y); }
174 else {
175 /* First stage tan */
176 if ((y=a+(t2-u5.d*a))==a+(t2+u5.d*a)) return y; }
177 /* Second stage */
178 /* Range reduction by algorithm ii */
179 t = (x*hpinv.d + toint.d);
180 xn = t - toint.d;
181 v.d = t;
182 t1 = (x - xn*mp1.d) - xn*mp2.d;
183 n =v.i[LOW_HALF] & 0x00000001;
184 da = xn*pp3.d;
185 t=t1-da;
186 da = (t1-t)-da;
187 t1 = xn*pp4.d;
188 a = t - t1;
189 da = ((t-a)-t1)+da;
191 /* Second stage */
192 EADD(a,da,t1,t2) a=t1; da=t2;
193 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
194 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
195 x2*a27.d))))));
196 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
197 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
198 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
199 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
200 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
201 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
202 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
203 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
204 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
205 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
206 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
207 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
208 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
209 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
211 if (n) {
212 /* Second stage -cot */
213 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
214 if ((y=c2+(cc2-u8.d*c2)) == c2+(cc2+u8.d*c2)) return (-y); }
215 else {
216 /* Second stage tan */
217 if ((y=c1+(cc1-u7.d*c1)) == c1+(cc1+u7.d*c1)) return y; }
218 return tanMp(x);
221 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
223 /* First stage */
224 i = ((int) (mfftnhf.d+TWO8*ya));
225 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
226 pz = z+z*z2*(e0.d+z2*e1.d);
227 fi = xfg[i][1].d; gi = xfg[i][2].d;
229 if (n) {
230 /* -cot */
231 t2 = pz*(fi+gi)/(fi+pz);
232 if ((y=gi-(t2-gi*u10.d))==gi-(t2+gi*u10.d)) return (-sy*y);
233 t3 = (t2<ZERO) ? -t2 : t2;
234 t4 = gi*ua10.d+t3*ub10.d;
235 if ((y=gi-(t2-t4))==gi-(t2+t4)) return (-sy*y); }
236 else {
237 /* tan */
238 t2 = pz*(gi+fi)/(gi-pz);
239 if ((y=fi+(t2-fi*u9.d))==fi+(t2+fi*u9.d)) return (sy*y);
240 t3 = (t2<ZERO) ? -t2 : t2;
241 t4 = fi*ua9.d+t3*ub9.d;
242 if ((y=fi+(t2-t4))==fi+(t2+t4)) return (sy*y); }
244 /* Second stage */
245 ffi = xfg[i][3].d;
246 EADD(z0,yya,z,zz)
247 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
248 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
249 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
250 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
251 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
252 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
253 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
254 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
256 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
257 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
258 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
260 if (n) {
261 /* -cot */
262 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
263 if ((y=c3+(cc3-u12.d*c3))==c3+(cc3+u12.d*c3)) return (-sy*y); }
264 else {
265 /* tan */
266 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
267 if ((y=c3+(cc3-u11.d*c3))==c3+(cc3+u11.d*c3)) return (sy*y); }
269 return tanMp(x);
272 /* (---) The case 25 < abs(x) <= 1e8 */
273 if (w<=g5.d) {
274 /* Range reduction by algorithm ii */
275 t = (x*hpinv.d + toint.d);
276 xn = t - toint.d;
277 v.d = t;
278 t1 = (x - xn*mp1.d) - xn*mp2.d;
279 n =v.i[LOW_HALF] & 0x00000001;
280 da = xn*pp3.d;
281 t=t1-da;
282 da = (t1-t)-da;
283 t1 = xn*pp4.d;
284 a = t - t1;
285 da = ((t-a)-t1)+da;
286 EADD(a,da,t1,t2) a=t1; da=t2;
287 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
288 else {ya= a; yya= da; sy= ONE;}
290 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
291 if (ya<=gy1.d) return tanMp(x);
293 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
294 if (ya<=gy2.d) {
295 a2 = a*a;
296 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
297 if (n) {
298 /* First stage -cot */
299 EADD(a,t2,b,db)
300 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
301 if ((y=c+(dc-u14.d*c))==c+(dc+u14.d*c)) return (-y); }
302 else {
303 /* First stage tan */
304 if ((y=a+(t2-u13.d*a))==a+(t2+u13.d*a)) return y; }
306 /* Second stage */
307 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
308 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
309 x2*a27.d))))));
310 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
311 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
312 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
313 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
314 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
315 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
316 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
317 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
318 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
319 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
320 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
321 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
322 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
323 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
325 if (n) {
326 /* Second stage -cot */
327 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
328 if ((y=c2+(cc2-u16.d*c2)) == c2+(cc2+u16.d*c2)) return (-y); }
329 else {
330 /* Second stage tan */
331 if ((y=c1+(cc1-u15.d*c1)) == c1+(cc1+u15.d*c1)) return (y); }
332 return tanMp(x);
335 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
336 /* First stage */
337 i = ((int) (mfftnhf.d+TWO8*ya));
338 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
339 pz = z+z*z2*(e0.d+z2*e1.d);
340 fi = xfg[i][1].d; gi = xfg[i][2].d;
342 if (n) {
343 /* -cot */
344 t2 = pz*(fi+gi)/(fi+pz);
345 if ((y=gi-(t2-gi*u18.d))==gi-(t2+gi*u18.d)) return (-sy*y);
346 t3 = (t2<ZERO) ? -t2 : t2;
347 t4 = gi*ua18.d+t3*ub18.d;
348 if ((y=gi-(t2-t4))==gi-(t2+t4)) return (-sy*y); }
349 else {
350 /* tan */
351 t2 = pz*(gi+fi)/(gi-pz);
352 if ((y=fi+(t2-fi*u17.d))==fi+(t2+fi*u17.d)) return (sy*y);
353 t3 = (t2<ZERO) ? -t2 : t2;
354 t4 = fi*ua17.d+t3*ub17.d;
355 if ((y=fi+(t2-t4))==fi+(t2+t4)) return (sy*y); }
357 /* Second stage */
358 ffi = xfg[i][3].d;
359 EADD(z0,yya,z,zz)
360 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
361 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
362 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
363 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
364 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
365 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
366 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
367 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
369 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
370 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
371 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
373 if (n) {
374 /* -cot */
375 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
376 if ((y=c3+(cc3-u20.d*c3))==c3+(cc3+u20.d*c3)) return (-sy*y); }
377 else {
378 /* tan */
379 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
380 if ((y=c3+(cc3-u19.d*c3))==c3+(cc3+u19.d*c3)) return (sy*y); }
381 return tanMp(x);
384 /* (---) The case 1e8 < abs(x) < 2**1024 */
385 /* Range reduction by algorithm iii */
386 n = (__branred(x,&a,&da)) & 0x00000001;
387 EADD(a,da,t1,t2) a=t1; da=t2;
388 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
389 else {ya= a; yya= da; sy= ONE;}
391 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
392 if (ya<=gy1.d) return tanMp(x);
394 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
395 if (ya<=gy2.d) {
396 a2 = a*a;
397 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
398 if (n) {
399 /* First stage -cot */
400 EADD(a,t2,b,db)
401 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
402 if ((y=c+(dc-u22.d*c))==c+(dc+u22.d*c)) return (-y); }
403 else {
404 /* First stage tan */
405 if ((y=a+(t2-u21.d*a))==a+(t2+u21.d*a)) return y; }
407 /* Second stage */
408 /* Reduction by algorithm iv */
409 p=10; n = (__mpranred(x,&mpa,p)) & 0x00000001;
410 __mp_dbl(&mpa,&a,p); __dbl_mp(a,&mpt1,p);
411 __sub(&mpa,&mpt1,&mpt2,p); __mp_dbl(&mpt2,&da,p);
413 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
414 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
415 x2*a27.d))))));
416 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
417 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
418 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
419 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
420 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
421 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
422 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
423 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
424 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
425 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
426 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
427 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
428 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
429 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
431 if (n) {
432 /* Second stage -cot */
433 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
434 if ((y=c2+(cc2-u24.d*c2)) == c2+(cc2+u24.d*c2)) return (-y); }
435 else {
436 /* Second stage tan */
437 if ((y=c1+(cc1-u23.d*c1)) == c1+(cc1+u23.d*c1)) return y; }
438 return tanMp(x);
441 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
442 /* First stage */
443 i = ((int) (mfftnhf.d+TWO8*ya));
444 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
445 pz = z+z*z2*(e0.d+z2*e1.d);
446 fi = xfg[i][1].d; gi = xfg[i][2].d;
448 if (n) {
449 /* -cot */
450 t2 = pz*(fi+gi)/(fi+pz);
451 if ((y=gi-(t2-gi*u26.d))==gi-(t2+gi*u26.d)) return (-sy*y);
452 t3 = (t2<ZERO) ? -t2 : t2;
453 t4 = gi*ua26.d+t3*ub26.d;
454 if ((y=gi-(t2-t4))==gi-(t2+t4)) return (-sy*y); }
455 else {
456 /* tan */
457 t2 = pz*(gi+fi)/(gi-pz);
458 if ((y=fi+(t2-fi*u25.d))==fi+(t2+fi*u25.d)) return (sy*y);
459 t3 = (t2<ZERO) ? -t2 : t2;
460 t4 = fi*ua25.d+t3*ub25.d;
461 if ((y=fi+(t2-t4))==fi+(t2+t4)) return (sy*y); }
463 /* Second stage */
464 ffi = xfg[i][3].d;
465 EADD(z0,yya,z,zz)
466 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
467 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
468 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
469 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
470 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
471 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
472 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
473 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
475 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
476 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
477 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
479 if (n) {
480 /* -cot */
481 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
482 if ((y=c3+(cc3-u28.d*c3))==c3+(cc3+u28.d*c3)) return (-sy*y); }
483 else {
484 /* tan */
485 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
486 if ((y=c3+(cc3-u27.d*c3))==c3+(cc3+u27.d*c3)) return (sy*y); }
487 return tanMp(x);
491 /* multiple precision stage */
492 /* Convert x to multi precision number,compute tan(x) by mptan() routine */
493 /* and converts result back to double */
494 static double
495 SECTION
496 tanMp(double x)
498 int p;
499 double y;
500 mp_no mpy;
501 p=32;
502 __mptan(x, &mpy, p);
503 __mp_dbl(&mpy,&y,p);
504 return y;
507 #ifdef NO_LONG_DOUBLE
508 weak_alias (tan, tanl)
509 #endif