2.9
[glibc/nacl-glibc.git] / sysdeps / ieee754 / dbl-64 / e_atan2.c
blob9e1a794ec82bef0f98d88fc15805514e3ab73c00
1 /*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001 Free Software Foundation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 /************************************************************************/
21 /* MODULE_NAME: atnat2.c */
22 /* */
23 /* FUNCTIONS: uatan2 */
24 /* atan2Mp */
25 /* signArctan2 */
26 /* normalized */
27 /* */
28 /* FILES NEEDED: dla.h endian.h mpa.h mydefs.h atnat2.h */
29 /* mpatan.c mpatan2.c mpsqrt.c */
30 /* uatan.tbl */
31 /* */
32 /* An ultimate atan2() routine. Given two IEEE double machine numbers y,*/
33 /* x it computes the correctly rounded (to nearest) value of atan2(y,x).*/
34 /* */
35 /* Assumption: Machine arithmetic operations are performed in */
36 /* round to nearest mode of IEEE 754 standard. */
37 /* */
38 /************************************************************************/
40 #include "dla.h"
41 #include "mpa.h"
42 #include "MathLib.h"
43 #include "uatan.tbl"
44 #include "atnat2.h"
45 #include "math_private.h"
47 /************************************************************************/
48 /* An ultimate atan2 routine. Given two IEEE double machine numbers y,x */
49 /* it computes the correctly rounded (to nearest) value of atan2(y,x). */
50 /* Assumption: Machine arithmetic operations are performed in */
51 /* round to nearest mode of IEEE 754 standard. */
52 /************************************************************************/
53 static double atan2Mp(double ,double ,const int[]);
54 static double signArctan2(double ,double);
55 static double normalized(double ,double,double ,double);
56 void __mpatan2(mp_no *,mp_no *,mp_no *,int);
58 double __ieee754_atan2(double y,double x) {
60 int i,de,ux,dx,uy,dy;
61 #if 0
62 int p;
63 #endif
64 static const int pr[MM]={6,8,10,20,32};
65 double ax,ay,u,du,u9,ua,v,vv,dv,t1,t2,t3,t4,t5,t6,t7,t8,
66 z,zz,cor,s1,ss1,s2,ss2;
67 #if 0
68 double z1,z2;
69 #endif
70 number num;
71 #if 0
72 mp_no mperr,mpt1,mpx,mpy,mpz,mpz1,mpz2;
73 #endif
75 static const int ep= 59768832, /* 57*16**5 */
76 em=-59768832; /* -57*16**5 */
78 /* x=NaN or y=NaN */
79 num.d = x; ux = num.i[HIGH_HALF]; dx = num.i[LOW_HALF];
80 if ((ux&0x7ff00000) ==0x7ff00000) {
81 if (((ux&0x000fffff)|dx)!=0x00000000) return x+x; }
82 num.d = y; uy = num.i[HIGH_HALF]; dy = num.i[LOW_HALF];
83 if ((uy&0x7ff00000) ==0x7ff00000) {
84 if (((uy&0x000fffff)|dy)!=0x00000000) return y+y; }
86 /* y=+-0 */
87 if (uy==0x00000000) {
88 if (dy==0x00000000) {
89 if ((ux&0x80000000)==0x00000000) return ZERO;
90 else return opi.d; } }
91 else if (uy==0x80000000) {
92 if (dy==0x00000000) {
93 if ((ux&0x80000000)==0x00000000) return MZERO;
94 else return mopi.d;} }
96 /* x=+-0 */
97 if (x==ZERO) {
98 if ((uy&0x80000000)==0x00000000) return hpi.d;
99 else return mhpi.d; }
101 /* x=+-INF */
102 if (ux==0x7ff00000) {
103 if (dx==0x00000000) {
104 if (uy==0x7ff00000) {
105 if (dy==0x00000000) return qpi.d; }
106 else if (uy==0xfff00000) {
107 if (dy==0x00000000) return mqpi.d; }
108 else {
109 if ((uy&0x80000000)==0x00000000) return ZERO;
110 else return MZERO; }
113 else if (ux==0xfff00000) {
114 if (dx==0x00000000) {
115 if (uy==0x7ff00000) {
116 if (dy==0x00000000) return tqpi.d; }
117 else if (uy==0xfff00000) {
118 if (dy==0x00000000) return mtqpi.d; }
119 else {
120 if ((uy&0x80000000)==0x00000000) return opi.d;
121 else return mopi.d; }
125 /* y=+-INF */
126 if (uy==0x7ff00000) {
127 if (dy==0x00000000) return hpi.d; }
128 else if (uy==0xfff00000) {
129 if (dy==0x00000000) return mhpi.d; }
131 /* either x/y or y/x is very close to zero */
132 ax = (x<ZERO) ? -x : x; ay = (y<ZERO) ? -y : y;
133 de = (uy & 0x7ff00000) - (ux & 0x7ff00000);
134 if (de>=ep) { return ((y>ZERO) ? hpi.d : mhpi.d); }
135 else if (de<=em) {
136 if (x>ZERO) {
137 if ((z=ay/ax)<TWOM1022) return normalized(ax,ay,y,z);
138 else return signArctan2(y,z); }
139 else { return ((y>ZERO) ? opi.d : mopi.d); } }
141 /* if either x or y is extremely close to zero, scale abs(x), abs(y). */
142 if (ax<twom500.d || ay<twom500.d) { ax*=two500.d; ay*=two500.d; }
144 /* x,y which are neither special nor extreme */
145 if (ay<ax) {
146 u=ay/ax;
147 EMULV(ax,u,v,vv,t1,t2,t3,t4,t5)
148 du=((ay-v)-vv)/ax; }
149 else {
150 u=ax/ay;
151 EMULV(ay,u,v,vv,t1,t2,t3,t4,t5)
152 du=((ax-v)-vv)/ay; }
154 if (x>ZERO) {
156 /* (i) x>0, abs(y)< abs(x): atan(ay/ax) */
157 if (ay<ax) {
158 if (u<inv16.d) {
159 v=u*u; zz=du+u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
160 if ((z=u+(zz-u1.d*u)) == u+(zz+u1.d*u)) return signArctan2(y,z);
162 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
163 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
164 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
165 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
166 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
167 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
168 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
169 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
170 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
171 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
172 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
173 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
174 if ((z=s1+(ss1-u5.d*s1)) == s1+(ss1+u5.d*s1)) return signArctan2(y,z);
175 return atan2Mp(x,y,pr);
177 else {
178 i=(TWO52+TWO8*u)-TWO52; i-=16;
179 t3=u-cij[i][0].d;
180 EADD(t3,du,v,dv)
181 t1=cij[i][1].d; t2=cij[i][2].d;
182 zz=v*t2+(dv*t2+v*v*(cij[i][3].d+v*(cij[i][4].d+
183 v*(cij[i][5].d+v* cij[i][6].d))));
184 if (i<112) {
185 if (i<48) u9=u91.d; /* u < 1/4 */
186 else u9=u92.d; } /* 1/4 <= u < 1/2 */
187 else {
188 if (i<176) u9=u93.d; /* 1/2 <= u < 3/4 */
189 else u9=u94.d; } /* 3/4 <= u <= 1 */
190 if ((z=t1+(zz-u9*t1)) == t1+(zz+u9*t1)) return signArctan2(y,z);
192 t1=u-hij[i][0].d;
193 EADD(t1,du,v,vv)
194 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
195 v*(hij[i][14].d+v* hij[i][15].d))));
196 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
197 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
198 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
199 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
200 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
201 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
202 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
203 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
204 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
205 if ((z=s2+(ss2-ub.d*s2)) == s2+(ss2+ub.d*s2)) return signArctan2(y,z);
206 return atan2Mp(x,y,pr);
210 /* (ii) x>0, abs(x)<=abs(y): pi/2-atan(ax/ay) */
211 else {
212 if (u<inv16.d) {
213 v=u*u;
214 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
215 ESUB(hpi.d,u,t2,cor)
216 t3=((hpi1.d+cor)-du)-zz;
217 if ((z=t2+(t3-u2.d)) == t2+(t3+u2.d)) return signArctan2(y,z);
219 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
220 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
221 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
222 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
223 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
224 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
225 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
226 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
227 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
228 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
229 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
230 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
231 SUB2(hpi.d,hpi1.d,s1,ss1,s2,ss2,t1,t2)
232 if ((z=s2+(ss2-u6.d)) == s2+(ss2+u6.d)) return signArctan2(y,z);
233 return atan2Mp(x,y,pr);
235 else {
236 i=(TWO52+TWO8*u)-TWO52; i-=16;
237 v=(u-cij[i][0].d)+du;
238 zz=hpi1.d-v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
239 v*(cij[i][5].d+v* cij[i][6].d))));
240 t1=hpi.d-cij[i][1].d;
241 if (i<112) ua=ua1.d; /* w < 1/2 */
242 else ua=ua2.d; /* w >= 1/2 */
243 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
245 t1=u-hij[i][0].d;
246 EADD(t1,du,v,vv)
247 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
248 v*(hij[i][14].d+v* hij[i][15].d))));
249 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
250 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
251 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
252 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
253 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
254 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
255 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
256 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
257 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
258 SUB2(hpi.d,hpi1.d,s2,ss2,s1,ss1,t1,t2)
259 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
260 return atan2Mp(x,y,pr);
264 else {
266 /* (iii) x<0, abs(x)< abs(y): pi/2+atan(ax/ay) */
267 if (ax<ay) {
268 if (u<inv16.d) {
269 v=u*u;
270 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
271 EADD(hpi.d,u,t2,cor)
272 t3=((hpi1.d+cor)+du)+zz;
273 if ((z=t2+(t3-u3.d)) == t2+(t3+u3.d)) return signArctan2(y,z);
275 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
276 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
277 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
278 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
279 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
280 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
281 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
282 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
283 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
284 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
285 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
286 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
287 ADD2(hpi.d,hpi1.d,s1,ss1,s2,ss2,t1,t2)
288 if ((z=s2+(ss2-u7.d)) == s2+(ss2+u7.d)) return signArctan2(y,z);
289 return atan2Mp(x,y,pr);
291 else {
292 i=(TWO52+TWO8*u)-TWO52; i-=16;
293 v=(u-cij[i][0].d)+du;
294 zz=hpi1.d+v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
295 v*(cij[i][5].d+v* cij[i][6].d))));
296 t1=hpi.d+cij[i][1].d;
297 if (i<112) ua=ua1.d; /* w < 1/2 */
298 else ua=ua2.d; /* w >= 1/2 */
299 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
301 t1=u-hij[i][0].d;
302 EADD(t1,du,v,vv)
303 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
304 v*(hij[i][14].d+v* hij[i][15].d))));
305 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
306 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
307 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
308 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
309 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
310 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
311 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
312 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
313 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
314 ADD2(hpi.d,hpi1.d,s2,ss2,s1,ss1,t1,t2)
315 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
316 return atan2Mp(x,y,pr);
320 /* (iv) x<0, abs(y)<=abs(x): pi-atan(ax/ay) */
321 else {
322 if (u<inv16.d) {
323 v=u*u;
324 zz=u*v*(d3.d+v*(d5.d+v*(d7.d+v*(d9.d+v*(d11.d+v*d13.d)))));
325 ESUB(opi.d,u,t2,cor)
326 t3=((opi1.d+cor)-du)-zz;
327 if ((z=t2+(t3-u4.d)) == t2+(t3+u4.d)) return signArctan2(y,z);
329 MUL2(u,du,u,du,v,vv,t1,t2,t3,t4,t5,t6,t7,t8)
330 s1=v*(f11.d+v*(f13.d+v*(f15.d+v*(f17.d+v*f19.d))));
331 ADD2(f9.d,ff9.d,s1,ZERO,s2,ss2,t1,t2)
332 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
333 ADD2(f7.d,ff7.d,s1,ss1,s2,ss2,t1,t2)
334 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
335 ADD2(f5.d,ff5.d,s1,ss1,s2,ss2,t1,t2)
336 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
337 ADD2(f3.d,ff3.d,s1,ss1,s2,ss2,t1,t2)
338 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
339 MUL2(u,du,s1,ss1,s2,ss2,t1,t2,t3,t4,t5,t6,t7,t8)
340 ADD2(u,du,s2,ss2,s1,ss1,t1,t2)
341 SUB2(opi.d,opi1.d,s1,ss1,s2,ss2,t1,t2)
342 if ((z=s2+(ss2-u8.d)) == s2+(ss2+u8.d)) return signArctan2(y,z);
343 return atan2Mp(x,y,pr);
345 else {
346 i=(TWO52+TWO8*u)-TWO52; i-=16;
347 v=(u-cij[i][0].d)+du;
348 zz=opi1.d-v*(cij[i][2].d+v*(cij[i][3].d+v*(cij[i][4].d+
349 v*(cij[i][5].d+v* cij[i][6].d))));
350 t1=opi.d-cij[i][1].d;
351 if (i<112) ua=ua1.d; /* w < 1/2 */
352 else ua=ua2.d; /* w >= 1/2 */
353 if ((z=t1+(zz-ua)) == t1+(zz+ua)) return signArctan2(y,z);
355 t1=u-hij[i][0].d;
356 EADD(t1,du,v,vv)
357 s1=v*(hij[i][11].d+v*(hij[i][12].d+v*(hij[i][13].d+
358 v*(hij[i][14].d+v* hij[i][15].d))));
359 ADD2(hij[i][9].d,hij[i][10].d,s1,ZERO,s2,ss2,t1,t2)
360 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
361 ADD2(hij[i][7].d,hij[i][8].d,s1,ss1,s2,ss2,t1,t2)
362 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
363 ADD2(hij[i][5].d,hij[i][6].d,s1,ss1,s2,ss2,t1,t2)
364 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
365 ADD2(hij[i][3].d,hij[i][4].d,s1,ss1,s2,ss2,t1,t2)
366 MUL2(v,vv,s2,ss2,s1,ss1,t1,t2,t3,t4,t5,t6,t7,t8)
367 ADD2(hij[i][1].d,hij[i][2].d,s1,ss1,s2,ss2,t1,t2)
368 SUB2(opi.d,opi1.d,s2,ss2,s1,ss1,t1,t2)
369 if ((z=s1+(ss1-uc.d)) == s1+(ss1+uc.d)) return signArctan2(y,z);
370 return atan2Mp(x,y,pr);
375 /* Treat the Denormalized case */
376 static double normalized(double ax,double ay,double y, double z)
377 { int p;
378 mp_no mpx,mpy,mpz,mperr,mpz2,mpt1;
379 p=6;
380 __dbl_mp(ax,&mpx,p); __dbl_mp(ay,&mpy,p); __dvd(&mpy,&mpx,&mpz,p);
381 __dbl_mp(ue.d,&mpt1,p); __mul(&mpz,&mpt1,&mperr,p);
382 __sub(&mpz,&mperr,&mpz2,p); __mp_dbl(&mpz2,&z,p);
383 return signArctan2(y,z);
385 /* Fix the sign and return after stage 1 or stage 2 */
386 static double signArctan2(double y,double z)
388 return ((y<ZERO) ? -z : z);
390 /* Stage 3: Perform a multi-Precision computation */
391 static double atan2Mp(double x,double y,const int pr[])
393 double z1,z2;
394 int i,p;
395 mp_no mpx,mpy,mpz,mpz1,mpz2,mperr,mpt1;
396 for (i=0; i<MM; i++) {
397 p = pr[i];
398 __dbl_mp(x,&mpx,p); __dbl_mp(y,&mpy,p);
399 __mpatan2(&mpy,&mpx,&mpz,p);
400 __dbl_mp(ud[i].d,&mpt1,p); __mul(&mpz,&mpt1,&mperr,p);
401 __add(&mpz,&mperr,&mpz1,p); __sub(&mpz,&mperr,&mpz2,p);
402 __mp_dbl(&mpz1,&z1,p); __mp_dbl(&mpz2,&z2,p);
403 if (z1==z2) return z1;
405 return z1; /*if unpossible to do exact computing */