2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001, 2009 Free Software Foundation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 /*********************************************************************/
21 /* MODULE_NAME: utan.c */
26 /* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
27 /* branred.c sincos32.c mptan.c */
30 /* An ultimate tan routine. Given an IEEE double machine number x */
31 /* it computes the correctly rounded (to nearest) value of tan(x). */
32 /* Assumption: Machine arithmetic operations are performed in */
33 /* round to nearest mode of IEEE 754 standard. */
35 /*********************************************************************/
44 static double tanMp(double);
45 void __mptan(double, mp_no
*, int);
47 double tan(double x
) {
52 double a
,da
,a2
,b
,db
,c
,dc
,c1
,cc1
,c2
,cc2
,c3
,cc3
,fi
,ffi
,gi
,pz
,s
,sy
,
53 t
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
,w
,x2
,xn
,xx2
,y
,ya
,yya
,z0
,z
,zz
,z2
,zz2
;
61 int __branred(double, double *, double *);
62 int __mpranred(double, mp_no
*, int);
65 num
.d
= x
; ux
= num
.i
[HIGH_HALF
];
66 if ((ux
&0x7ff00000)==0x7ff00000) {
67 if ((ux
&0x7fffffff)==0x7ff00000)
74 /* (I) The case abs(x) <= 1.259e-8 */
75 if (w
<=g1
.d
) return x
;
77 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
82 t2
= x
*x2
*(d3
.d
+x2
*(d5
.d
+x2
*(d7
.d
+x2
*(d9
.d
+x2
*d11
.d
))));
83 if ((y
=x
+(t2
-u1
.d
*t2
)) == x
+(t2
+u1
.d
*t2
)) return y
;
86 c1
= x2
*(a15
.d
+x2
*(a17
.d
+x2
*(a19
.d
+x2
*(a21
.d
+x2
*(a23
.d
+x2
*(a25
.d
+
88 EMULV(x
,x
,x2
,xx2
,t1
,t2
,t3
,t4
,t5
)
89 ADD2(a13
.d
,aa13
.d
,c1
,zero
.d
,c2
,cc2
,t1
,t2
)
90 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
91 ADD2(a11
.d
,aa11
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
92 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
93 ADD2(a9
.d
,aa9
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
94 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
95 ADD2(a7
.d
,aa7
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
96 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
97 ADD2(a5
.d
,aa5
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
98 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
99 ADD2(a3
.d
,aa3
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
100 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
101 MUL2(x
,zero
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
102 ADD2(x
,zero
.d
,c2
,cc2
,c1
,cc1
,t1
,t2
)
103 if ((y
=c1
+(cc1
-u2
.d
*c1
)) == c1
+(cc1
+u2
.d
*c1
)) return y
;
107 /* (III) The case 0.0608 < abs(x) <= 0.787 */
111 i
= ((int) (mfftnhf
.d
+TWO8
*w
));
112 z
= w
-xfg
[i
][0].d
; z2
= z
*z
; s
= (x
<ZERO
) ? MONE
: ONE
;
113 pz
= z
+z
*z2
*(e0
.d
+z2
*e1
.d
);
114 fi
= xfg
[i
][1].d
; gi
= xfg
[i
][2].d
; t2
= pz
*(gi
+fi
)/(gi
-pz
);
115 if ((y
=fi
+(t2
-fi
*u3
.d
))==fi
+(t2
+fi
*u3
.d
)) return (s
*y
);
116 t3
= (t2
<ZERO
) ? -t2
: t2
;
117 if ((y
=fi
+(t2
-(t4
=fi
*ua3
.d
+t3
*ub3
.d
)))==fi
+(t2
+t4
)) return (s
*y
);
121 c1
= z2
*(a7
.d
+z2
*(a9
.d
+z2
*a11
.d
));
122 EMULV(z
,z
,z2
,zz2
,t1
,t2
,t3
,t4
,t5
)
123 ADD2(a5
.d
,aa5
.d
,c1
,zero
.d
,c2
,cc2
,t1
,t2
)
124 MUL2(z2
,zz2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
125 ADD2(a3
.d
,aa3
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
126 MUL2(z2
,zz2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
127 MUL2(z
,zero
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
128 ADD2(z
,zero
.d
,c2
,cc2
,c1
,cc1
,t1
,t2
)
130 ADD2(fi
,ffi
,c1
,cc1
,c2
,cc2
,t1
,t2
)
131 MUL2(fi
,ffi
,c1
,cc1
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
132 SUB2(one
.d
,zero
.d
,c3
,cc3
,c1
,cc1
,t1
,t2
)
133 DIV2(c2
,cc2
,c1
,cc1
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
135 if ((y
=c3
+(cc3
-u4
.d
*c3
))==c3
+(cc3
+u4
.d
*c3
)) return (s
*y
);
139 /* (---) The case 0.787 < abs(x) <= 25 */
141 /* Range reduction by algorithm i */
142 t
= (x
*hpinv
.d
+ toint
.d
);
145 t1
= (x
- xn
*mp1
.d
) - xn
*mp2
.d
;
146 n
=v
.i
[LOW_HALF
] & 0x00000001;
150 if (a
<ZERO
) {ya
=-a
; yya
=-da
; sy
=MONE
;}
151 else {ya
= a
; yya
= da
; sy
= ONE
;}
153 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
154 if (ya
<=gy1
.d
) return tanMp(x
);
156 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
159 t2
= da
+a
*a2
*(d3
.d
+a2
*(d5
.d
+a2
*(d7
.d
+a2
*(d9
.d
+a2
*d11
.d
))));
161 /* First stage -cot */
163 DIV2(one
.d
,zero
.d
,b
,db
,c
,dc
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
164 if ((y
=c
+(dc
-u6
.d
*c
))==c
+(dc
+u6
.d
*c
)) return (-y
); }
166 /* First stage tan */
167 if ((y
=a
+(t2
-u5
.d
*a
))==a
+(t2
+u5
.d
*a
)) return y
; }
169 /* Range reduction by algorithm ii */
170 t
= (x
*hpinv
.d
+ toint
.d
);
173 t1
= (x
- xn
*mp1
.d
) - xn
*mp2
.d
;
174 n
=v
.i
[LOW_HALF
] & 0x00000001;
183 EADD(a
,da
,t1
,t2
) a
=t1
; da
=t2
;
184 MUL2(a
,da
,a
,da
,x2
,xx2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
185 c1
= x2
*(a15
.d
+x2
*(a17
.d
+x2
*(a19
.d
+x2
*(a21
.d
+x2
*(a23
.d
+x2
*(a25
.d
+
187 ADD2(a13
.d
,aa13
.d
,c1
,zero
.d
,c2
,cc2
,t1
,t2
)
188 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
189 ADD2(a11
.d
,aa11
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
190 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
191 ADD2(a9
.d
,aa9
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
192 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
193 ADD2(a7
.d
,aa7
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
194 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
195 ADD2(a5
.d
,aa5
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
196 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
197 ADD2(a3
.d
,aa3
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
198 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
199 MUL2(a
,da
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
200 ADD2(a
,da
,c2
,cc2
,c1
,cc1
,t1
,t2
)
203 /* Second stage -cot */
204 DIV2(one
.d
,zero
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
205 if ((y
=c2
+(cc2
-u8
.d
*c2
)) == c2
+(cc2
+u8
.d
*c2
)) return (-y
); }
207 /* Second stage tan */
208 if ((y
=c1
+(cc1
-u7
.d
*c1
)) == c1
+(cc1
+u7
.d
*c1
)) return y
; }
212 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
215 i
= ((int) (mfftnhf
.d
+TWO8
*ya
));
216 z
= (z0
=(ya
-xfg
[i
][0].d
))+yya
; z2
= z
*z
;
217 pz
= z
+z
*z2
*(e0
.d
+z2
*e1
.d
);
218 fi
= xfg
[i
][1].d
; gi
= xfg
[i
][2].d
;
222 t2
= pz
*(fi
+gi
)/(fi
+pz
);
223 if ((y
=gi
-(t2
-gi
*u10
.d
))==gi
-(t2
+gi
*u10
.d
)) return (-sy
*y
);
224 t3
= (t2
<ZERO
) ? -t2
: t2
;
225 if ((y
=gi
-(t2
-(t4
=gi
*ua10
.d
+t3
*ub10
.d
)))==gi
-(t2
+t4
)) return (-sy
*y
); }
228 t2
= pz
*(gi
+fi
)/(gi
-pz
);
229 if ((y
=fi
+(t2
-fi
*u9
.d
))==fi
+(t2
+fi
*u9
.d
)) return (sy
*y
);
230 t3
= (t2
<ZERO
) ? -t2
: t2
;
231 if ((y
=fi
+(t2
-(t4
=fi
*ua9
.d
+t3
*ub9
.d
)))==fi
+(t2
+t4
)) return (sy
*y
); }
236 MUL2(z
,zz
,z
,zz
,z2
,zz2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
237 c1
= z2
*(a7
.d
+z2
*(a9
.d
+z2
*a11
.d
));
238 ADD2(a5
.d
,aa5
.d
,c1
,zero
.d
,c2
,cc2
,t1
,t2
)
239 MUL2(z2
,zz2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
240 ADD2(a3
.d
,aa3
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
241 MUL2(z2
,zz2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
242 MUL2(z
,zz
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
243 ADD2(z
,zz
,c2
,cc2
,c1
,cc1
,t1
,t2
)
245 ADD2(fi
,ffi
,c1
,cc1
,c2
,cc2
,t1
,t2
)
246 MUL2(fi
,ffi
,c1
,cc1
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
247 SUB2(one
.d
,zero
.d
,c3
,cc3
,c1
,cc1
,t1
,t2
)
251 DIV2(c1
,cc1
,c2
,cc2
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
252 if ((y
=c3
+(cc3
-u12
.d
*c3
))==c3
+(cc3
+u12
.d
*c3
)) return (-sy
*y
); }
255 DIV2(c2
,cc2
,c1
,cc1
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
256 if ((y
=c3
+(cc3
-u11
.d
*c3
))==c3
+(cc3
+u11
.d
*c3
)) return (sy
*y
); }
261 /* (---) The case 25 < abs(x) <= 1e8 */
263 /* Range reduction by algorithm ii */
264 t
= (x
*hpinv
.d
+ toint
.d
);
267 t1
= (x
- xn
*mp1
.d
) - xn
*mp2
.d
;
268 n
=v
.i
[LOW_HALF
] & 0x00000001;
275 EADD(a
,da
,t1
,t2
) a
=t1
; da
=t2
;
276 if (a
<ZERO
) {ya
=-a
; yya
=-da
; sy
=MONE
;}
277 else {ya
= a
; yya
= da
; sy
= ONE
;}
279 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
280 if (ya
<=gy1
.d
) return tanMp(x
);
282 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
285 t2
= da
+a
*a2
*(d3
.d
+a2
*(d5
.d
+a2
*(d7
.d
+a2
*(d9
.d
+a2
*d11
.d
))));
287 /* First stage -cot */
289 DIV2(one
.d
,zero
.d
,b
,db
,c
,dc
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
290 if ((y
=c
+(dc
-u14
.d
*c
))==c
+(dc
+u14
.d
*c
)) return (-y
); }
292 /* First stage tan */
293 if ((y
=a
+(t2
-u13
.d
*a
))==a
+(t2
+u13
.d
*a
)) return y
; }
296 MUL2(a
,da
,a
,da
,x2
,xx2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
297 c1
= x2
*(a15
.d
+x2
*(a17
.d
+x2
*(a19
.d
+x2
*(a21
.d
+x2
*(a23
.d
+x2
*(a25
.d
+
299 ADD2(a13
.d
,aa13
.d
,c1
,zero
.d
,c2
,cc2
,t1
,t2
)
300 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
301 ADD2(a11
.d
,aa11
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
302 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
303 ADD2(a9
.d
,aa9
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
304 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
305 ADD2(a7
.d
,aa7
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
306 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
307 ADD2(a5
.d
,aa5
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
308 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
309 ADD2(a3
.d
,aa3
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
310 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
311 MUL2(a
,da
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
312 ADD2(a
,da
,c2
,cc2
,c1
,cc1
,t1
,t2
)
315 /* Second stage -cot */
316 DIV2(one
.d
,zero
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
317 if ((y
=c2
+(cc2
-u16
.d
*c2
)) == c2
+(cc2
+u16
.d
*c2
)) return (-y
); }
319 /* Second stage tan */
320 if ((y
=c1
+(cc1
-u15
.d
*c1
)) == c1
+(cc1
+u15
.d
*c1
)) return (y
); }
324 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
326 i
= ((int) (mfftnhf
.d
+TWO8
*ya
));
327 z
= (z0
=(ya
-xfg
[i
][0].d
))+yya
; z2
= z
*z
;
328 pz
= z
+z
*z2
*(e0
.d
+z2
*e1
.d
);
329 fi
= xfg
[i
][1].d
; gi
= xfg
[i
][2].d
;
333 t2
= pz
*(fi
+gi
)/(fi
+pz
);
334 if ((y
=gi
-(t2
-gi
*u18
.d
))==gi
-(t2
+gi
*u18
.d
)) return (-sy
*y
);
335 t3
= (t2
<ZERO
) ? -t2
: t2
;
336 if ((y
=gi
-(t2
-(t4
=gi
*ua18
.d
+t3
*ub18
.d
)))==gi
-(t2
+t4
)) return (-sy
*y
); }
339 t2
= pz
*(gi
+fi
)/(gi
-pz
);
340 if ((y
=fi
+(t2
-fi
*u17
.d
))==fi
+(t2
+fi
*u17
.d
)) return (sy
*y
);
341 t3
= (t2
<ZERO
) ? -t2
: t2
;
342 if ((y
=fi
+(t2
-(t4
=fi
*ua17
.d
+t3
*ub17
.d
)))==fi
+(t2
+t4
)) return (sy
*y
); }
347 MUL2(z
,zz
,z
,zz
,z2
,zz2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
348 c1
= z2
*(a7
.d
+z2
*(a9
.d
+z2
*a11
.d
));
349 ADD2(a5
.d
,aa5
.d
,c1
,zero
.d
,c2
,cc2
,t1
,t2
)
350 MUL2(z2
,zz2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
351 ADD2(a3
.d
,aa3
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
352 MUL2(z2
,zz2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
353 MUL2(z
,zz
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
354 ADD2(z
,zz
,c2
,cc2
,c1
,cc1
,t1
,t2
)
356 ADD2(fi
,ffi
,c1
,cc1
,c2
,cc2
,t1
,t2
)
357 MUL2(fi
,ffi
,c1
,cc1
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
358 SUB2(one
.d
,zero
.d
,c3
,cc3
,c1
,cc1
,t1
,t2
)
362 DIV2(c1
,cc1
,c2
,cc2
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
363 if ((y
=c3
+(cc3
-u20
.d
*c3
))==c3
+(cc3
+u20
.d
*c3
)) return (-sy
*y
); }
366 DIV2(c2
,cc2
,c1
,cc1
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
367 if ((y
=c3
+(cc3
-u19
.d
*c3
))==c3
+(cc3
+u19
.d
*c3
)) return (sy
*y
); }
371 /* (---) The case 1e8 < abs(x) < 2**1024 */
372 /* Range reduction by algorithm iii */
373 n
= (__branred(x
,&a
,&da
)) & 0x00000001;
374 EADD(a
,da
,t1
,t2
) a
=t1
; da
=t2
;
375 if (a
<ZERO
) {ya
=-a
; yya
=-da
; sy
=MONE
;}
376 else {ya
= a
; yya
= da
; sy
= ONE
;}
378 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
379 if (ya
<=gy1
.d
) return tanMp(x
);
381 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
384 t2
= da
+a
*a2
*(d3
.d
+a2
*(d5
.d
+a2
*(d7
.d
+a2
*(d9
.d
+a2
*d11
.d
))));
386 /* First stage -cot */
388 DIV2(one
.d
,zero
.d
,b
,db
,c
,dc
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
389 if ((y
=c
+(dc
-u22
.d
*c
))==c
+(dc
+u22
.d
*c
)) return (-y
); }
391 /* First stage tan */
392 if ((y
=a
+(t2
-u21
.d
*a
))==a
+(t2
+u21
.d
*a
)) return y
; }
395 /* Reduction by algorithm iv */
396 p
=10; n
= (__mpranred(x
,&mpa
,p
)) & 0x00000001;
397 __mp_dbl(&mpa
,&a
,p
); __dbl_mp(a
,&mpt1
,p
);
398 __sub(&mpa
,&mpt1
,&mpt2
,p
); __mp_dbl(&mpt2
,&da
,p
);
400 MUL2(a
,da
,a
,da
,x2
,xx2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
401 c1
= x2
*(a15
.d
+x2
*(a17
.d
+x2
*(a19
.d
+x2
*(a21
.d
+x2
*(a23
.d
+x2
*(a25
.d
+
403 ADD2(a13
.d
,aa13
.d
,c1
,zero
.d
,c2
,cc2
,t1
,t2
)
404 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
405 ADD2(a11
.d
,aa11
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
406 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
407 ADD2(a9
.d
,aa9
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
408 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
409 ADD2(a7
.d
,aa7
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
410 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
411 ADD2(a5
.d
,aa5
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
412 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
413 ADD2(a3
.d
,aa3
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
414 MUL2(x2
,xx2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
415 MUL2(a
,da
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
416 ADD2(a
,da
,c2
,cc2
,c1
,cc1
,t1
,t2
)
419 /* Second stage -cot */
420 DIV2(one
.d
,zero
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
421 if ((y
=c2
+(cc2
-u24
.d
*c2
)) == c2
+(cc2
+u24
.d
*c2
)) return (-y
); }
423 /* Second stage tan */
424 if ((y
=c1
+(cc1
-u23
.d
*c1
)) == c1
+(cc1
+u23
.d
*c1
)) return y
; }
428 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
430 i
= ((int) (mfftnhf
.d
+TWO8
*ya
));
431 z
= (z0
=(ya
-xfg
[i
][0].d
))+yya
; z2
= z
*z
;
432 pz
= z
+z
*z2
*(e0
.d
+z2
*e1
.d
);
433 fi
= xfg
[i
][1].d
; gi
= xfg
[i
][2].d
;
437 t2
= pz
*(fi
+gi
)/(fi
+pz
);
438 if ((y
=gi
-(t2
-gi
*u26
.d
))==gi
-(t2
+gi
*u26
.d
)) return (-sy
*y
);
439 t3
= (t2
<ZERO
) ? -t2
: t2
;
440 if ((y
=gi
-(t2
-(t4
=gi
*ua26
.d
+t3
*ub26
.d
)))==gi
-(t2
+t4
)) return (-sy
*y
); }
443 t2
= pz
*(gi
+fi
)/(gi
-pz
);
444 if ((y
=fi
+(t2
-fi
*u25
.d
))==fi
+(t2
+fi
*u25
.d
)) return (sy
*y
);
445 t3
= (t2
<ZERO
) ? -t2
: t2
;
446 if ((y
=fi
+(t2
-(t4
=fi
*ua25
.d
+t3
*ub25
.d
)))==fi
+(t2
+t4
)) return (sy
*y
); }
451 MUL2(z
,zz
,z
,zz
,z2
,zz2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
452 c1
= z2
*(a7
.d
+z2
*(a9
.d
+z2
*a11
.d
));
453 ADD2(a5
.d
,aa5
.d
,c1
,zero
.d
,c2
,cc2
,t1
,t2
)
454 MUL2(z2
,zz2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
455 ADD2(a3
.d
,aa3
.d
,c1
,cc1
,c2
,cc2
,t1
,t2
)
456 MUL2(z2
,zz2
,c2
,cc2
,c1
,cc1
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
457 MUL2(z
,zz
,c1
,cc1
,c2
,cc2
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
458 ADD2(z
,zz
,c2
,cc2
,c1
,cc1
,t1
,t2
)
460 ADD2(fi
,ffi
,c1
,cc1
,c2
,cc2
,t1
,t2
)
461 MUL2(fi
,ffi
,c1
,cc1
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
)
462 SUB2(one
.d
,zero
.d
,c3
,cc3
,c1
,cc1
,t1
,t2
)
466 DIV2(c1
,cc1
,c2
,cc2
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
467 if ((y
=c3
+(cc3
-u28
.d
*c3
))==c3
+(cc3
+u28
.d
*c3
)) return (-sy
*y
); }
470 DIV2(c2
,cc2
,c1
,cc1
,c3
,cc3
,t1
,t2
,t3
,t4
,t5
,t6
,t7
,t8
,t9
,t10
)
471 if ((y
=c3
+(cc3
-u27
.d
*c3
))==c3
+(cc3
+u27
.d
*c3
)) return (sy
*y
); }
476 /* multiple precision stage */
477 /* Convert x to multi precision number,compute tan(x) by mptan() routine */
478 /* and converts result back to double */
479 static double tanMp(double x
)
490 #ifdef NO_LONG_DOUBLE
491 weak_alias (tan
, tanl
)