Update.
[glibc.git] / sysdeps / ieee754 / dbl-64 / e_atan2.c
blobadf7a0d11bb3ac4304daef4cf77fccdbeb8df85c
1 /*
2 * IBM Accurate Mathematical Library
3 * Copyright (c) International Business Machines Corp., 2001
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU Lesser General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 /************************************************************************/
20 /* MODULE_NAME: atnat2.c */
21 /* */
22 /* FUNCTIONS: uatan2 */
23 /* atan2Mp */
24 /* signArctan2 */
25 /* normalized */
26 /* */
27 /* FILES NEEDED: dla.h endian.h mpa.h mydefs.h atnat2.h */
28 /* mpatan.c mpatan2.c mpsqrt.c */
29 /* uatan.tbl */
30 /* */
31 /* An ultimate atan2() routine. Given two IEEE double machine numbers y,*/
32 /* x it computes the correctly rounded (to nearest) value of atan2(y,x).*/
33 /* */
34 /* Assumption: Machine arithmetic operations are performed in */
35 /* round to nearest mode of IEEE 754 standard. */
36 /* */
37 /************************************************************************/
39 #include "dla.h"
40 #include "mpa.h"
41 #include "MathLib.h"
42 #include "uatan.tbl"
43 #include "atnat2.h"
44 /************************************************************************/
45 /* An ultimate atan2 routine. Given two IEEE double machine numbers y,x */
46 /* it computes the correctly rounded (to nearest) value of atan2(y,x). */
47 /* Assumption: Machine arithmetic operations are performed in */
48 /* round to nearest mode of IEEE 754 standard. */
49 /************************************************************************/
50 static double atan2Mp(double ,double ,const int[]);
51 static double signArctan2(double ,double);
52 static double normalized(double ,double,double ,double);
53 void __mpatan2(mp_no *,mp_no *,mp_no *,int);
55 double __ieee754_atan2(double y,double x) {
57 int i,de,ux,dx,uy,dy;
58 #if 0
59 int p;
60 #endif
61 static const int pr[MM]={6,8,10,20,32};
62 double ax,ay,u,du,u9,ua,v,vv,dv,t1,t2,t3,t4,t5,t6,t7,t8,
63 z,zz,cor,s1,ss1,s2,ss2;
64 #if 0
65 double z1,z2;
66 #endif
67 number num;
68 #if 0
69 mp_no mperr,mpt1,mpx,mpy,mpz,mpz1,mpz2;
70 #endif
72 static const int ep= 59768832, /* 57*16**5 */
73 em=-59768832; /* -57*16**5 */
75 /* x=NaN or y=NaN */
76 num.d = x; ux = num.i[HIGH_HALF]; dx = num.i[LOW_HALF];
77 if ((ux&0x7ff00000) ==0x7ff00000) {
78 if (((ux&0x000fffff)|dx)!=0x00000000) return x+x; }
79 num.d = y; uy = num.i[HIGH_HALF]; dy = num.i[LOW_HALF];
80 if ((uy&0x7ff00000) ==0x7ff00000) {
81 if (((uy&0x000fffff)|dy)!=0x00000000) return y+y; }
83 /* y=+-0 */
84 if (uy==0x00000000) {
85 if (dy==0x00000000) {
86 if ((ux&0x80000000)==0x00000000) return ZERO;
87 else return opi.d; } }
88 else if (uy==0x80000000) {
89 if (dy==0x00000000) {
90 if ((ux&0x80000000)==0x00000000) return MZERO;
91 else return mopi.d;} }
93 /* x=+-0 */
94 if (x==ZERO) {
95 if ((uy&0x80000000)==0x00000000) return hpi.d;
96 else return mhpi.d; }
98 /* x=+-INF */
99 if (ux==0x7ff00000) {
100 if (dx==0x00000000) {
101 if (uy==0x7ff00000) {
102 if (dy==0x00000000) return qpi.d; }
103 else if (uy==0xfff00000) {
104 if (dy==0x00000000) return mqpi.d; }
105 else {
106 if ((uy&0x80000000)==0x00000000) return ZERO;
107 else return MZERO; }
110 else if (ux==0xfff00000) {
111 if (dx==0x00000000) {
112 if (uy==0x7ff00000) {
113 if (dy==0x00000000) return tqpi.d; }
114 else if (uy==0xfff00000) {
115 if (dy==0x00000000) return mtqpi.d; }
116 else {
117 if ((uy&0x80000000)==0x00000000) return opi.d;
118 else return mopi.d; }
122 /* y=+-INF */
123 if (uy==0x7ff00000) {
124 if (dy==0x00000000) return hpi.d; }
125 else if (uy==0xfff00000) {
126 if (dy==0x00000000) return mhpi.d; }
128 /* either x/y or y/x is very close to zero */
129 ax = (x<ZERO) ? -x : x; ay = (y<ZERO) ? -y : y;
130 de = (uy & 0x7ff00000) - (ux & 0x7ff00000);
131 if (de>=ep) { return ((y>ZERO) ? hpi.d : mhpi.d); }
132 else if (de<=em) {
133 if (x>ZERO) {
134 if ((z=ay/ax)<TWOM1022) return normalized(ax,ay,y,z);
135 else return signArctan2(y,z); }
136 else { return ((y>ZERO) ? opi.d : mopi.d); } }
138 /* if either x or y is extremely close to zero, scale abs(x), abs(y). */
139 if (ax<twom500.d || ay<twom500.d) { ax*=two500.d; ay*=two500.d; }
141 /* x,y which are neither special nor extreme */
142 if (ay<ax) {
143 u=ay/ax;
144 EMULV(ax,u,v,vv,t1,t2,t3,t4,t5)
145 du=((ay-v)-vv)/ax; }
146 else {
147 u=ax/ay;
148 EMULV(ay,u,v,vv,t1,t2,t3,t4,t5)
149 du=((ax-v)-vv)/ay; }
151 if (x>ZERO) {
153 /* (i) x>0, abs(y)< abs(x): atan(ay/ax) */
154 if (ay<ax) {
155 if (u<inv16.d) {
156 v=u*u; zz=du+u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
157 if ((z=u+(zz-u1.d*u)) == u+(zz+u1.d*u)) return signArctan2(y,z);
159 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
160 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
161 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
162 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
163 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
164 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
165 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
166 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
167 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
168 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
169 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
170 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
171 if ((z=s1+(ss1-u5.d*s1)) == s1+(ss1+u5.d*s1)) return signArctan2(y,z);
172 return atan2Mp(x,y,pr);
174 else {
175 i=(TWO52+TWO8*u)-TWO52; i-=16;
176 t3=u-cij[i][0].d;
177 EADD(t3,du,v,dv)
178 t1=cij[i][1].d; t2=cij[i][2].d;
179 zz=v*t2+(dv*t2+v*v*(cij[i][3].d+v*(cij[i][4].d+
180 v*(cij[i][5].d+v* cij[i][6].d))));
181 if (i<112) {
182 if (i<48) u9=u91.d; /* u < 1/4 */
183 else u9=u92.d; } /* 1/4 <= u < 1/2 */
184 else {
185 if (i<176) u9=u93.d; /* 1/2 <= u < 3/4 */
186 else u9=u94.d; } /* 3/4 <= u <= 1 */
187 if ((z=t1+(zz-u9*t1)) == t1+(zz+u9*t1)) return signArctan2(y,z);
189 t1=u-hij[i][0].d;
190 EADD(t1,du,v,vv)
191 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
192 v*(hij[i][14].d+v* hij[i][15].d))));
193 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
194 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
195 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
196 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
197 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
198 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
199 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
200 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
201 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
202 if ((z=s2+(ss2-ub.d*s2)) == s2+(ss2+ub.d*s2)) return signArctan2(y,z);
203 return atan2Mp(x,y,pr);
207 /* (ii) x>0, abs(x)<=abs(y): pi/2-atan(ax/ay) */
208 else {
209 if (u<inv16.d) {
210 v=u*u;
211 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
212 ESUB(hpi.d,u,t2,cor)
213 t3=((hpi1.d+cor)-du)-zz;
214 if ((z=t2+(t3-u2.d)) == t2+(t3+u2.d)) return signArctan2(y,z);
216 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
217 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
218 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
219 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
220 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
221 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
222 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
223 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
224 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
225 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
226 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
227 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
228 SUB2(hpi.d,hpi1.d,s1,ss1,s2,ss2,t1,t2)
229 if ((z=s2+(ss2-u6.d)) == s2+(ss2+u6.d)) return signArctan2(y,z);
230 return atan2Mp(x,y,pr);
232 else {
233 i=(TWO52+TWO8*u)-TWO52; i-=16;
234 v=(u-cij[i][0].d)+du;
235 zz=hpi1.d-v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
236 v*(cij[i][5].d+v* cij[i][6].d))));
237 t1=hpi.d-cij[i][1].d;
238 if (i<112) ua=ua1.d; /* w < 1/2 */
239 else ua=ua2.d; /* w >= 1/2 */
240 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
242 t1=u-hij[i][0].d;
243 EADD(t1,du,v,vv)
244 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
245 v*(hij[i][14].d+v* hij[i][15].d))));
246 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
247 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
248 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
249 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
250 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
251 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
252 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
253 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
254 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
255 SUB2(hpi.d,hpi1.d,s2,ss2,s1,ss1,t1,t2)
256 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
257 return atan2Mp(x,y,pr);
261 else {
263 /* (iii) x<0, abs(x)< abs(y): pi/2+atan(ax/ay) */
264 if (ax<ay) {
265 if (u<inv16.d) {
266 v=u*u;
267 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
268 EADD(hpi.d,u,t2,cor)
269 t3=((hpi1.d+cor)+du)+zz;
270 if ((z=t2+(t3-u3.d)) == t2+(t3+u3.d)) return signArctan2(y,z);
272 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
273 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
274 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
275 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
276 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
277 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
278 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
279 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
280 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
281 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
282 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
283 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
284 ADD2(hpi.d,hpi1.d,s1,ss1,s2,ss2,t1,t2)
285 if ((z=s2+(ss2-u7.d)) == s2+(ss2+u7.d)) return signArctan2(y,z);
286 return atan2Mp(x,y,pr);
288 else {
289 i=(TWO52+TWO8*u)-TWO52; i-=16;
290 v=(u-cij[i][0].d)+du;
291 zz=hpi1.d+v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
292 v*(cij[i][5].d+v* cij[i][6].d))));
293 t1=hpi.d+cij[i][1].d;
294 if (i<112) ua=ua1.d; /* w < 1/2 */
295 else ua=ua2.d; /* w >= 1/2 */
296 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
298 t1=u-hij[i][0].d;
299 EADD(t1,du,v,vv)
300 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
301 v*(hij[i][14].d+v* hij[i][15].d))));
302 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
303 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
304 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
305 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
306 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
307 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
308 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
309 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
310 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
311 ADD2(hpi.d,hpi1.d,s2,ss2,s1,ss1,t1,t2)
312 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
313 return atan2Mp(x,y,pr);
317 /* (iv) x<0, abs(y)<=abs(x): pi-atan(ax/ay) */
318 else {
319 if (u<inv16.d) {
320 v=u*u;
321 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
322 ESUB(opi.d,u,t2,cor)
323 t3=((opi1.d+cor)-du)-zz;
324 if ((z=t2+(t3-u4.d)) == t2+(t3+u4.d)) return signArctan2(y,z);
326 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
327 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
328 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
329 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
330 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
331 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
332 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
333 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
334 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
335 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
336 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
337 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
338 SUB2(opi.d,opi1.d,s1,ss1,s2,ss2,t1,t2)
339 if ((z=s2+(ss2-u8.d)) == s2+(ss2+u8.d)) return signArctan2(y,z);
340 return atan2Mp(x,y,pr);
342 else {
343 i=(TWO52+TWO8*u)-TWO52; i-=16;
344 v=(u-cij[i][0].d)+du;
345 zz=opi1.d-v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
346 v*(cij[i][5].d+v* cij[i][6].d))));
347 t1=opi.d-cij[i][1].d;
348 if (i<112) ua=ua1.d; /* w < 1/2 */
349 else ua=ua2.d; /* w >= 1/2 */
350 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
352 t1=u-hij[i][0].d;
353 EADD(t1,du,v,vv)
354 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
355 v*(hij[i][14].d+v* hij[i][15].d))));
356 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
357 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
358 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
359 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
360 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
361 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
362 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
363 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
364 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
365 SUB2(opi.d,opi1.d,s2,ss2,s1,ss1,t1,t2)
366 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
367 return atan2Mp(x,y,pr);
372 /* Treat the Denormalized case */
373 static double normalized(double ax,double ay,double y, double z)
374 { int p;
375 mp_no mpx,mpy,mpz,mperr,mpz2,mpt1;
376 p=6;
377 __dbl_mp(ax,&mpx,p); __dbl_mp(ay,&mpy,p); __dvd(&mpy,&mpx,&mpz,p);
378 __dbl_mp(ue.d,&mpt1,p); __mul(&mpz,&mpt1,&mperr,p);
379 __sub(&mpz,&mperr,&mpz2,p); __mp_dbl(&mpz2,&z,p);
380 return signArctan2(y,z);
382 /* Fix the sign and return after stage 1 or stage 2 */
383 static double signArctan2(double y,double z)
385 return ((y<ZERO) ? -z : z);
387 /* Stage 3: Perform a multi-Precision computation */
388 static double atan2Mp(double x,double y,const int pr[])
390 double z1,z2;
391 int i,p;
392 mp_no mpx,mpy,mpz,mpz1,mpz2,mperr,mpt1;
393 for (i=0; i<MM; i++) {
394 p = pr[i];
395 __dbl_mp(x,&mpx,p); __dbl_mp(y,&mpy,p);
396 __mpatan2(&mpy,&mpx,&mpz,p);
397 __dbl_mp(ud[i].d,&mpt1,p); __mul(&mpz,&mpt1,&mperr,p);
398 __add(&mpz,&mperr,&mpz1,p); __sub(&mpz,&mperr,&mpz2,p);
399 __mp_dbl(&mpz1,&z1,p); __mp_dbl(&mpz2,&z2,p);
400 if (z1==z2) return z1;
402 return z1; /*if unpossible to do exact computing */