c99(1): Staticize.
[dragonfly.git] / contrib / mpc / src / sqrt.c
blobdd2ff6003493bee92439ea67860bdca56039003c
1 /* mpc_sqrt -- Take the square root of a complex number.
3 Copyright (C) 2002, 2008, 2009, 2010, 2011, 2012 INRIA
5 This file is part of GNU MPC.
7 GNU MPC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU Lesser General Public License as published by the
9 Free Software Foundation; either version 3 of the License, or (at your
10 option) any later version.
12 GNU MPC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
15 more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with this program. If not, see http://www.gnu.org/licenses/ .
21 #include "mpc-impl.h"
23 #if MPFR_VERSION_MAJOR < 3
24 #define mpfr_min_prec(x) \
25 ( ((prec + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB) * BITS_PER_MP_LIMB \
26 - mpn_scan1 (x->_mpfr_d, 0))
27 #endif
30 int
31 mpc_sqrt (mpc_ptr a, mpc_srcptr b, mpc_rnd_t rnd)
33 int ok_w, ok_t = 0;
34 mpfr_t w, t;
35 mpfr_rnd_t rnd_w, rnd_t;
36 mpfr_prec_t prec_w, prec_t;
37 /* the rounding mode and the precision required for w and t, which can */
38 /* be either the real or the imaginary part of a */
39 mpfr_prec_t prec;
40 int inex_w, inex_t = 1, inex_re, inex_im, loops = 0;
41 const int re_cmp = mpfr_cmp_ui (mpc_realref (b), 0),
42 im_cmp = mpfr_cmp_ui (mpc_imagref (b), 0);
43 /* comparison of the real/imaginary part of b with 0 */
44 int repr_w, repr_t = 0 /* to avoid gcc warning */ ;
45 /* flag indicating whether the computed value is already representable
46 at the target precision */
47 const int im_sgn = mpfr_signbit (mpc_imagref (b)) == 0 ? 0 : -1;
48 /* we need to know the sign of Im(b) when it is +/-0 */
49 const mpfr_rnd_t r = im_sgn ? GMP_RNDD : GMP_RNDU;
50 /* rounding mode used when computing t */
52 /* special values */
53 if (!mpc_fin_p (b)) {
54 /* sqrt(x +i*Inf) = +Inf +I*Inf, even if x = NaN */
55 /* sqrt(x -i*Inf) = +Inf -I*Inf, even if x = NaN */
56 if (mpfr_inf_p (mpc_imagref (b)))
58 mpfr_set_inf (mpc_realref (a), +1);
59 mpfr_set_inf (mpc_imagref (a), im_sgn);
60 return MPC_INEX (0, 0);
63 if (mpfr_inf_p (mpc_realref (b)))
65 if (mpfr_signbit (mpc_realref (b)))
67 if (mpfr_number_p (mpc_imagref (b)))
69 /* sqrt(-Inf +i*y) = +0 +i*Inf, when y positive */
70 /* sqrt(-Inf +i*y) = +0 -i*Inf, when y positive */
71 mpfr_set_ui (mpc_realref (a), 0, GMP_RNDN);
72 mpfr_set_inf (mpc_imagref (a), im_sgn);
73 return MPC_INEX (0, 0);
75 else
77 /* sqrt(-Inf +i*NaN) = NaN +/-i*Inf */
78 mpfr_set_nan (mpc_realref (a));
79 mpfr_set_inf (mpc_imagref (a), im_sgn);
80 return MPC_INEX (0, 0);
83 else
85 if (mpfr_number_p (mpc_imagref (b)))
87 /* sqrt(+Inf +i*y) = +Inf +i*0, when y positive */
88 /* sqrt(+Inf +i*y) = +Inf -i*0, when y positive */
89 mpfr_set_inf (mpc_realref (a), +1);
90 mpfr_set_ui (mpc_imagref (a), 0, GMP_RNDN);
91 if (im_sgn)
92 mpc_conj (a, a, MPC_RNDNN);
93 return MPC_INEX (0, 0);
95 else
97 /* sqrt(+Inf -i*Inf) = +Inf -i*Inf */
98 /* sqrt(+Inf +i*Inf) = +Inf +i*Inf */
99 /* sqrt(+Inf +i*NaN) = +Inf +i*NaN */
100 return mpc_set (a, b, rnd);
105 /* sqrt(x +i*NaN) = NaN +i*NaN, if x is not infinite */
106 /* sqrt(NaN +i*y) = NaN +i*NaN, if y is not infinite */
107 if (mpfr_nan_p (mpc_realref (b)) || mpfr_nan_p (mpc_imagref (b)))
109 mpfr_set_nan (mpc_realref (a));
110 mpfr_set_nan (mpc_imagref (a));
111 return MPC_INEX (0, 0);
115 /* purely real */
116 if (im_cmp == 0)
118 if (re_cmp == 0)
120 mpc_set_ui_ui (a, 0, 0, MPC_RNDNN);
121 if (im_sgn)
122 mpc_conj (a, a, MPC_RNDNN);
123 return MPC_INEX (0, 0);
125 else if (re_cmp > 0)
127 inex_w = mpfr_sqrt (mpc_realref (a), mpc_realref (b), MPC_RND_RE (rnd));
128 mpfr_set_ui (mpc_imagref (a), 0, GMP_RNDN);
129 if (im_sgn)
130 mpc_conj (a, a, MPC_RNDNN);
131 return MPC_INEX (inex_w, 0);
133 else
135 mpfr_init2 (w, MPC_PREC_RE (b));
136 mpfr_neg (w, mpc_realref (b), GMP_RNDN);
137 if (im_sgn)
139 inex_w = -mpfr_sqrt (mpc_imagref (a), w, INV_RND (MPC_RND_IM (rnd)));
140 mpfr_neg (mpc_imagref (a), mpc_imagref (a), GMP_RNDN);
142 else
143 inex_w = mpfr_sqrt (mpc_imagref (a), w, MPC_RND_IM (rnd));
145 mpfr_set_ui (mpc_realref (a), 0, GMP_RNDN);
146 mpfr_clear (w);
147 return MPC_INEX (0, inex_w);
151 /* purely imaginary */
152 if (re_cmp == 0)
154 mpfr_t y;
156 y[0] = mpc_imagref (b)[0];
157 /* If y/2 underflows, so does sqrt(y/2) */
158 mpfr_div_2ui (y, y, 1, GMP_RNDN);
159 if (im_cmp > 0)
161 inex_w = mpfr_sqrt (mpc_realref (a), y, MPC_RND_RE (rnd));
162 inex_t = mpfr_sqrt (mpc_imagref (a), y, MPC_RND_IM (rnd));
164 else
166 mpfr_neg (y, y, GMP_RNDN);
167 inex_w = mpfr_sqrt (mpc_realref (a), y, MPC_RND_RE (rnd));
168 inex_t = -mpfr_sqrt (mpc_imagref (a), y, INV_RND (MPC_RND_IM (rnd)));
169 mpfr_neg (mpc_imagref (a), mpc_imagref (a), GMP_RNDN);
171 return MPC_INEX (inex_w, inex_t);
174 prec = MPC_MAX_PREC(a);
176 mpfr_init (w);
177 mpfr_init (t);
179 if (re_cmp > 0) {
180 rnd_w = MPC_RND_RE (rnd);
181 prec_w = MPC_PREC_RE (a);
182 rnd_t = MPC_RND_IM(rnd);
183 if (rnd_t == GMP_RNDZ)
184 /* force GMP_RNDD or GMP_RNDUP, using sign(t) = sign(y) */
185 rnd_t = (im_cmp > 0 ? GMP_RNDD : GMP_RNDU);
186 prec_t = MPC_PREC_IM (a);
188 else {
189 prec_w = MPC_PREC_IM (a);
190 prec_t = MPC_PREC_RE (a);
191 if (im_cmp > 0) {
192 rnd_w = MPC_RND_IM(rnd);
193 rnd_t = MPC_RND_RE(rnd);
194 if (rnd_t == GMP_RNDZ)
195 rnd_t = GMP_RNDD;
197 else {
198 rnd_w = INV_RND(MPC_RND_IM (rnd));
199 rnd_t = INV_RND(MPC_RND_RE (rnd));
200 if (rnd_t == GMP_RNDZ)
201 rnd_t = GMP_RNDU;
207 loops ++;
208 prec += (loops <= 2) ? mpc_ceil_log2 (prec) + 4 : prec / 2;
209 mpfr_set_prec (w, prec);
210 mpfr_set_prec (t, prec);
211 /* let b = x + iy */
212 /* w = sqrt ((|x| + sqrt (x^2 + y^2)) / 2), rounded down */
213 /* total error bounded by 3 ulps */
214 inex_w = mpc_abs (w, b, GMP_RNDD);
215 if (re_cmp < 0)
216 inex_w |= mpfr_sub (w, w, mpc_realref (b), GMP_RNDD);
217 else
218 inex_w |= mpfr_add (w, w, mpc_realref (b), GMP_RNDD);
219 inex_w |= mpfr_div_2ui (w, w, 1, GMP_RNDD);
220 inex_w |= mpfr_sqrt (w, w, GMP_RNDD);
222 repr_w = mpfr_min_prec (w) <= prec_w;
223 if (!repr_w)
224 /* use the usual trick for obtaining the ternary value */
225 ok_w = mpfr_can_round (w, prec - 2, GMP_RNDD, GMP_RNDU,
226 prec_w + (rnd_w == GMP_RNDN));
227 else {
228 /* w is representable in the target precision and thus cannot be
229 rounded up */
230 if (rnd_w == GMP_RNDN)
231 /* If w can be rounded to nearest, then actually no rounding
232 occurs, and the ternary value is known from inex_w. */
233 ok_w = mpfr_can_round (w, prec - 2, GMP_RNDD, GMP_RNDN, prec_w);
234 else
235 /* If w can be rounded down, then any direct rounding and the
236 ternary flag can be determined from inex_w. */
237 ok_w = mpfr_can_round (w, prec - 2, GMP_RNDD, GMP_RNDD, prec_w);
240 if (!inex_w || ok_w) {
241 /* t = y / 2w, rounded away */
242 /* total error bounded by 7 ulps */
243 inex_t = mpfr_div (t, mpc_imagref (b), w, r);
244 if (!inex_t && inex_w)
245 /* The division was exact, but w was not. */
246 inex_t = im_sgn ? -1 : 1;
247 inex_t |= mpfr_div_2ui (t, t, 1, r);
248 repr_t = mpfr_min_prec (t) <= prec_t;
249 if (!repr_t)
250 /* As for w; since t was rounded away, we check whether rounding to 0
251 is possible. */
252 ok_t = mpfr_can_round (t, prec - 3, r, GMP_RNDZ,
253 prec_t + (rnd_t == GMP_RNDN));
254 else {
255 if (rnd_t == GMP_RNDN)
256 ok_t = mpfr_can_round (t, prec - 3, r, GMP_RNDN, prec_t);
257 else
258 ok_t = mpfr_can_round (t, prec - 3, r, r, prec_t);
262 while ((inex_w && !ok_w) || (inex_t && !ok_t));
264 if (re_cmp > 0) {
265 inex_re = mpfr_set (mpc_realref (a), w, MPC_RND_RE(rnd));
266 inex_im = mpfr_set (mpc_imagref (a), t, MPC_RND_IM(rnd));
268 else if (im_cmp > 0) {
269 inex_re = mpfr_set (mpc_realref(a), t, MPC_RND_RE(rnd));
270 inex_im = mpfr_set (mpc_imagref(a), w, MPC_RND_IM(rnd));
272 else {
273 inex_re = mpfr_neg (mpc_realref (a), t, MPC_RND_RE(rnd));
274 inex_im = mpfr_neg (mpc_imagref (a), w, MPC_RND_IM(rnd));
277 if (repr_w && inex_w) {
278 if (rnd_w == GMP_RNDN) {
279 /* w has not been rounded with mpfr_set/mpfr_neg, determine ternary
280 value from inex_w instead */
281 if (re_cmp > 0)
282 inex_re = inex_w;
283 else if (im_cmp > 0)
284 inex_im = inex_w;
285 else
286 inex_im = -inex_w;
288 else {
289 /* determine ternary value, but also potentially add 1 ulp; can only
290 be done now when we are in the target precision */
291 if (re_cmp > 0) {
292 if (rnd_w == GMP_RNDU) {
293 MPFR_ADD_ONE_ULP (mpc_realref (a));
294 inex_re = +1;
296 else
297 inex_re = -1;
299 else if (im_cmp > 0) {
300 if (rnd_w == GMP_RNDU) {
301 MPFR_ADD_ONE_ULP (mpc_imagref (a));
302 inex_im = +1;
304 else
305 inex_im = -1;
307 else {
308 if (rnd_w == GMP_RNDU) {
309 MPFR_ADD_ONE_ULP (mpc_imagref (a));
310 inex_im = -1;
312 else
313 inex_im = +1;
317 if (repr_t && inex_t) {
318 if (rnd_t == GMP_RNDN) {
319 if (re_cmp > 0)
320 inex_im = inex_t;
321 else if (im_cmp > 0)
322 inex_re = inex_t;
323 else
324 inex_re = -inex_t;
326 else {
327 if (re_cmp > 0) {
328 if (rnd_t == r)
329 inex_im = inex_t;
330 else {
331 inex_im = -inex_t;
332 /* im_cmp > 0 implies that Im(b) > 0, thus im_sgn = 0
333 and r = GMP_RNDU.
334 im_cmp < 0 implies that Im(b) < 0, thus im_sgn = -1
335 and r = GMP_RNDD. */
336 MPFR_SUB_ONE_ULP (mpc_imagref (a));
339 else if (im_cmp > 0) {
340 if (rnd_t == r)
341 inex_re = inex_t;
342 else {
343 inex_re = -inex_t;
344 /* im_cmp > 0 implies r = GMP_RNDU (see above) */
345 MPFR_SUB_ONE_ULP (mpc_realref (a));
348 else { /* im_cmp < 0 */
349 if (rnd_t == r)
350 inex_re = -inex_t;
351 else {
352 inex_re = inex_t;
353 /* im_cmp < 0 implies r = GMP_RNDD (see above) */
354 MPFR_SUB_ONE_ULP (mpc_realref (a));
360 mpfr_clear (w);
361 mpfr_clear (t);
363 return MPC_INEX (inex_re, inex_im);