Update.
[glibc.git] / sysdeps / ieee754 / dbl-64 / e_atan2.c
blobc72d25a8ad843b893f6d61c7a9173ef14662d62f
1 /*
2 * IBM Accurate Mathematical Library
3 * Copyright (c) International Business Machines Corp., 2001
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU Lesser General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 /************************************************************************/
20 /* MODULE_NAME: atnat2.c */
21 /* */
22 /* FUNCTIONS: uatan2 */
23 /* atan2Mp */
24 /* signArctan2 */
25 /* normalized */
26 /* */
27 /* FILES NEEDED: dla.h endian.h mpa.h mydefs.h atnat2.h */
28 /* mpatan.c mpatan2.c mpsqrt.c */
29 /* uatan.tbl */
30 /* */
31 /* An ultimate atan2() routine. Given two IEEE double machine numbers y,*/
32 /* x it computes the correctly rounded (to nearest) value of atan2(y,x).*/
33 /* */
34 /* Assumption: Machine arithmetic operations are performed in */
35 /* round to nearest mode of IEEE 754 standard. */
36 /* */
37 /************************************************************************/
39 #include "dla.h"
40 #include "mpa.h"
41 #include "MathLib.h"
42 #include "uatan.tbl"
43 #include "atnat2.h"
44 #include "math_private.h"
46 /************************************************************************/
47 /* An ultimate atan2 routine. Given two IEEE double machine numbers y,x */
48 /* it computes the correctly rounded (to nearest) value of atan2(y,x). */
49 /* Assumption: Machine arithmetic operations are performed in */
50 /* round to nearest mode of IEEE 754 standard. */
51 /************************************************************************/
52 static double atan2Mp(double ,double ,const int[]);
53 static double signArctan2(double ,double);
54 static double normalized(double ,double,double ,double);
55 void __mpatan2(mp_no *,mp_no *,mp_no *,int);
57 double __ieee754_atan2(double y,double x) {
59 int i,de,ux,dx,uy,dy;
60 #if 0
61 int p;
62 #endif
63 static const int pr[MM]={6,8,10,20,32};
64 double ax,ay,u,du,u9,ua,v,vv,dv,t1,t2,t3,t4,t5,t6,t7,t8,
65 z,zz,cor,s1,ss1,s2,ss2;
66 #if 0
67 double z1,z2;
68 #endif
69 number num;
70 #if 0
71 mp_no mperr,mpt1,mpx,mpy,mpz,mpz1,mpz2;
72 #endif
74 static const int ep= 59768832, /* 57*16**5 */
75 em=-59768832; /* -57*16**5 */
77 /* x=NaN or y=NaN */
78 num.d = x; ux = num.i[HIGH_HALF]; dx = num.i[LOW_HALF];
79 if ((ux&0x7ff00000) ==0x7ff00000) {
80 if (((ux&0x000fffff)|dx)!=0x00000000) return x+x; }
81 num.d = y; uy = num.i[HIGH_HALF]; dy = num.i[LOW_HALF];
82 if ((uy&0x7ff00000) ==0x7ff00000) {
83 if (((uy&0x000fffff)|dy)!=0x00000000) return y+y; }
85 /* y=+-0 */
86 if (uy==0x00000000) {
87 if (dy==0x00000000) {
88 if ((ux&0x80000000)==0x00000000) return ZERO;
89 else return opi.d; } }
90 else if (uy==0x80000000) {
91 if (dy==0x00000000) {
92 if ((ux&0x80000000)==0x00000000) return MZERO;
93 else return mopi.d;} }
95 /* x=+-0 */
96 if (x==ZERO) {
97 if ((uy&0x80000000)==0x00000000) return hpi.d;
98 else return mhpi.d; }
100 /* x=+-INF */
101 if (ux==0x7ff00000) {
102 if (dx==0x00000000) {
103 if (uy==0x7ff00000) {
104 if (dy==0x00000000) return qpi.d; }
105 else if (uy==0xfff00000) {
106 if (dy==0x00000000) return mqpi.d; }
107 else {
108 if ((uy&0x80000000)==0x00000000) return ZERO;
109 else return MZERO; }
112 else if (ux==0xfff00000) {
113 if (dx==0x00000000) {
114 if (uy==0x7ff00000) {
115 if (dy==0x00000000) return tqpi.d; }
116 else if (uy==0xfff00000) {
117 if (dy==0x00000000) return mtqpi.d; }
118 else {
119 if ((uy&0x80000000)==0x00000000) return opi.d;
120 else return mopi.d; }
124 /* y=+-INF */
125 if (uy==0x7ff00000) {
126 if (dy==0x00000000) return hpi.d; }
127 else if (uy==0xfff00000) {
128 if (dy==0x00000000) return mhpi.d; }
130 /* either x/y or y/x is very close to zero */
131 ax = (x<ZERO) ? -x : x; ay = (y<ZERO) ? -y : y;
132 de = (uy & 0x7ff00000) - (ux & 0x7ff00000);
133 if (de>=ep) { return ((y>ZERO) ? hpi.d : mhpi.d); }
134 else if (de<=em) {
135 if (x>ZERO) {
136 if ((z=ay/ax)<TWOM1022) return normalized(ax,ay,y,z);
137 else return signArctan2(y,z); }
138 else { return ((y>ZERO) ? opi.d : mopi.d); } }
140 /* if either x or y is extremely close to zero, scale abs(x), abs(y). */
141 if (ax<twom500.d || ay<twom500.d) { ax*=two500.d; ay*=two500.d; }
143 /* x,y which are neither special nor extreme */
144 if (ay<ax) {
145 u=ay/ax;
146 EMULV(ax,u,v,vv,t1,t2,t3,t4,t5)
147 du=((ay-v)-vv)/ax; }
148 else {
149 u=ax/ay;
150 EMULV(ay,u,v,vv,t1,t2,t3,t4,t5)
151 du=((ax-v)-vv)/ay; }
153 if (x>ZERO) {
155 /* (i) x>0, abs(y)< abs(x): atan(ay/ax) */
156 if (ay<ax) {
157 if (u<inv16.d) {
158 v=u*u; zz=du+u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
159 if ((z=u+(zz-u1.d*u)) == u+(zz+u1.d*u)) return signArctan2(y,z);
161 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
162 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
163 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
164 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
165 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
166 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
167 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
168 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
169 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
170 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
171 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
172 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
173 if ((z=s1+(ss1-u5.d*s1)) == s1+(ss1+u5.d*s1)) return signArctan2(y,z);
174 return atan2Mp(x,y,pr);
176 else {
177 i=(TWO52+TWO8*u)-TWO52; i-=16;
178 t3=u-cij[i][0].d;
179 EADD(t3,du,v,dv)
180 t1=cij[i][1].d; t2=cij[i][2].d;
181 zz=v*t2+(dv*t2+v*v*(cij[i][3].d+v*(cij[i][4].d+
182 v*(cij[i][5].d+v* cij[i][6].d))));
183 if (i<112) {
184 if (i<48) u9=u91.d; /* u < 1/4 */
185 else u9=u92.d; } /* 1/4 <= u < 1/2 */
186 else {
187 if (i<176) u9=u93.d; /* 1/2 <= u < 3/4 */
188 else u9=u94.d; } /* 3/4 <= u <= 1 */
189 if ((z=t1+(zz-u9*t1)) == t1+(zz+u9*t1)) return signArctan2(y,z);
191 t1=u-hij[i][0].d;
192 EADD(t1,du,v,vv)
193 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
194 v*(hij[i][14].d+v* hij[i][15].d))));
195 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
196 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
197 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
198 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
199 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
200 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
201 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
202 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
203 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
204 if ((z=s2+(ss2-ub.d*s2)) == s2+(ss2+ub.d*s2)) return signArctan2(y,z);
205 return atan2Mp(x,y,pr);
209 /* (ii) x>0, abs(x)<=abs(y): pi/2-atan(ax/ay) */
210 else {
211 if (u<inv16.d) {
212 v=u*u;
213 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
214 ESUB(hpi.d,u,t2,cor)
215 t3=((hpi1.d+cor)-du)-zz;
216 if ((z=t2+(t3-u2.d)) == t2+(t3+u2.d)) return signArctan2(y,z);
218 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
219 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
220 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
221 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
222 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
223 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
224 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
225 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
226 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
227 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
228 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
229 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
230 SUB2(hpi.d,hpi1.d,s1,ss1,s2,ss2,t1,t2)
231 if ((z=s2+(ss2-u6.d)) == s2+(ss2+u6.d)) return signArctan2(y,z);
232 return atan2Mp(x,y,pr);
234 else {
235 i=(TWO52+TWO8*u)-TWO52; i-=16;
236 v=(u-cij[i][0].d)+du;
237 zz=hpi1.d-v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
238 v*(cij[i][5].d+v* cij[i][6].d))));
239 t1=hpi.d-cij[i][1].d;
240 if (i<112) ua=ua1.d; /* w < 1/2 */
241 else ua=ua2.d; /* w >= 1/2 */
242 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
244 t1=u-hij[i][0].d;
245 EADD(t1,du,v,vv)
246 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
247 v*(hij[i][14].d+v* hij[i][15].d))));
248 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
249 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
250 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
251 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
252 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
253 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
254 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
255 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
256 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
257 SUB2(hpi.d,hpi1.d,s2,ss2,s1,ss1,t1,t2)
258 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
259 return atan2Mp(x,y,pr);
263 else {
265 /* (iii) x<0, abs(x)< abs(y): pi/2+atan(ax/ay) */
266 if (ax<ay) {
267 if (u<inv16.d) {
268 v=u*u;
269 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
270 EADD(hpi.d,u,t2,cor)
271 t3=((hpi1.d+cor)+du)+zz;
272 if ((z=t2+(t3-u3.d)) == t2+(t3+u3.d)) return signArctan2(y,z);
274 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
275 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
276 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
277 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
278 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
279 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
280 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
281 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
282 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
283 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
284 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
285 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
286 ADD2(hpi.d,hpi1.d,s1,ss1,s2,ss2,t1,t2)
287 if ((z=s2+(ss2-u7.d)) == s2+(ss2+u7.d)) return signArctan2(y,z);
288 return atan2Mp(x,y,pr);
290 else {
291 i=(TWO52+TWO8*u)-TWO52; i-=16;
292 v=(u-cij[i][0].d)+du;
293 zz=hpi1.d+v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
294 v*(cij[i][5].d+v* cij[i][6].d))));
295 t1=hpi.d+cij[i][1].d;
296 if (i<112) ua=ua1.d; /* w < 1/2 */
297 else ua=ua2.d; /* w >= 1/2 */
298 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
300 t1=u-hij[i][0].d;
301 EADD(t1,du,v,vv)
302 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
303 v*(hij[i][14].d+v* hij[i][15].d))));
304 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
305 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
306 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
307 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
308 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
309 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
310 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
311 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
312 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
313 ADD2(hpi.d,hpi1.d,s2,ss2,s1,ss1,t1,t2)
314 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
315 return atan2Mp(x,y,pr);
319 /* (iv) x<0, abs(y)<=abs(x): pi-atan(ax/ay) */
320 else {
321 if (u<inv16.d) {
322 v=u*u;
323 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
324 ESUB(opi.d,u,t2,cor)
325 t3=((opi1.d+cor)-du)-zz;
326 if ((z=t2+(t3-u4.d)) == t2+(t3+u4.d)) return signArctan2(y,z);
328 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
329 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
330 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
331 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
332 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
333 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
334 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
335 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
336 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
337 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
338 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
339 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
340 SUB2(opi.d,opi1.d,s1,ss1,s2,ss2,t1,t2)
341 if ((z=s2+(ss2-u8.d)) == s2+(ss2+u8.d)) return signArctan2(y,z);
342 return atan2Mp(x,y,pr);
344 else {
345 i=(TWO52+TWO8*u)-TWO52; i-=16;
346 v=(u-cij[i][0].d)+du;
347 zz=opi1.d-v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
348 v*(cij[i][5].d+v* cij[i][6].d))));
349 t1=opi.d-cij[i][1].d;
350 if (i<112) ua=ua1.d; /* w < 1/2 */
351 else ua=ua2.d; /* w >= 1/2 */
352 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
354 t1=u-hij[i][0].d;
355 EADD(t1,du,v,vv)
356 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
357 v*(hij[i][14].d+v* hij[i][15].d))));
358 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
359 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
360 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
361 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
362 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
363 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
364 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
365 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
366 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
367 SUB2(opi.d,opi1.d,s2,ss2,s1,ss1,t1,t2)
368 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
369 return atan2Mp(x,y,pr);
374 /* Treat the Denormalized case */
375 static double normalized(double ax,double ay,double y, double z)
376 { int p;
377 mp_no mpx,mpy,mpz,mperr,mpz2,mpt1;
378 p=6;
379 __dbl_mp(ax,&mpx,p); __dbl_mp(ay,&mpy,p); __dvd(&mpy,&mpx,&mpz,p);
380 __dbl_mp(ue.d,&mpt1,p); __mul(&mpz,&mpt1,&mperr,p);
381 __sub(&mpz,&mperr,&mpz2,p); __mp_dbl(&mpz2,&z,p);
382 return signArctan2(y,z);
384 /* Fix the sign and return after stage 1 or stage 2 */
385 static double signArctan2(double y,double z)
387 return ((y<ZERO) ? -z : z);
389 /* Stage 3: Perform a multi-Precision computation */
390 static double atan2Mp(double x,double y,const int pr[])
392 double z1,z2;
393 int i,p;
394 mp_no mpx,mpy,mpz,mpz1,mpz2,mperr,mpt1;
395 for (i=0; i<MM; i++) {
396 p = pr[i];
397 __dbl_mp(x,&mpx,p); __dbl_mp(y,&mpy,p);
398 __mpatan2(&mpy,&mpx,&mpz,p);
399 __dbl_mp(ud[i].d,&mpt1,p); __mul(&mpz,&mpt1,&mperr,p);
400 __add(&mpz,&mperr,&mpz1,p); __sub(&mpz,&mperr,&mpz2,p);
401 __mp_dbl(&mpz1,&z1,p); __mp_dbl(&mpz2,&z2,p);
402 if (z1==z2) return z1;
404 return z1; /*if unpossible to do exact computing */