1 /* mpf_sub -- Subtract two floats.
3 Copyright 1993-1996, 1999-2002, 2004, 2005, 2011, 2014 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of either:
10 * the GNU Lesser General Public License as published by the Free
11 Software Foundation; either version 3 of the License, or (at your
12 option) any later version.
16 * the GNU General Public License as published by the Free Software
17 Foundation; either version 2 of the License, or (at your option) any
20 or both in parallel, as here.
22 The GNU MP Library is distributed in the hope that it will be useful, but
23 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received copies of the GNU General Public License and the
28 GNU Lesser General Public License along with the GNU MP Library. If not,
29 see https://www.gnu.org/licenses/. */
35 mpf_sub (mpf_ptr r
, mpf_srcptr u
, mpf_srcptr v
)
39 mp_size_t usize
, vsize
, rsize
;
49 /* Handle special cases that don't work in generic code below. */
62 /* If signs of U and V are different, perform addition. */
63 if ((usize
^ vsize
) < 0)
65 __mpf_struct v_negated
;
66 v_negated
._mp_size
= -vsize
;
67 v_negated
._mp_exp
= EXP (v
);
68 v_negated
._mp_d
= PTR (v
);
69 mpf_add (r
, u
, &v_negated
);
75 /* Signs are now known to be the same. */
78 /* Make U be the operand with the largest exponent. */
79 if (EXP (u
) < EXP (v
))
95 ediff
= exp
- EXP (v
);
97 /* If ediff is 0 or 1, we might have a situation where the operands are
98 extremely close. We need to scan the operands from the most significant
99 end ignore the initial parts that are equal. */
104 /* Skip leading limbs in U and V that are equal. */
105 /* This loop normally exits immediately. Optimize for that. */
106 while (up
[usize
- 1] == vp
[vsize
- 1])
114 /* u cancels high limbs of v, result is rest of v */
117 /* strip high zeros before truncating to prec */
118 while (vsize
!= 0 && vp
[vsize
- 1] == 0)
128 MPN_COPY_INCR (rp
, vp
, vsize
);
140 if (up
[usize
- 1] < vp
[vsize
- 1])
142 /* For simplicity, swap U and V. Note that since the loop above
143 wouldn't have exited unless up[usize - 1] and vp[vsize - 1]
144 were non-equal, this if-statement catches all cases where U
145 is smaller than V. */
146 MPN_SRCPTR_SWAP (up
,usize
, vp
,vsize
);
148 /* negating ediff not necessary since it is 0. */
154 if (up
[usize
- 1] != vp
[vsize
- 1] + 1)
160 else /* ediff == 1 */
166 if (up
[usize
- 1] != 1 || vp
[vsize
- 1] != GMP_NUMB_MAX
167 || (usize
>= 2 && up
[usize
- 2] != 0))
174 /* Skip sequences of 00000000/ffffffff */
175 while (vsize
!= 0 && usize
!= 0 && up
[usize
- 1] == 0
176 && vp
[vsize
- 1] == GMP_NUMB_MAX
)
185 while (vsize
!= 0 && vp
[vsize
- 1] == GMP_NUMB_MAX
)
191 else if (usize
> prec
- 1)
193 up
+= usize
- (prec
- 1);
196 if (vsize
> prec
- 1)
198 vp
+= vsize
- (prec
- 1);
202 tp
= TMP_ALLOC_LIMBS (prec
);
207 MPN_COPY (tp
, up
, usize
);
215 cy_limb
= mpn_neg (tp
, vp
, vsize
);
218 else if (usize
>= vsize
)
223 size
= usize
- vsize
;
224 MPN_COPY (tp
, up
, size
);
225 cy_limb
= mpn_sub_n (tp
+ size
, up
+ size
, vp
, vsize
);
228 else /* (usize < vsize) */
233 size
= vsize
- usize
;
234 cy_limb
= mpn_neg (tp
, vp
, size
);
235 cy_limb
= mpn_sub_nc (tp
+ size
, up
, vp
+ size
, usize
, cy_limb
);
250 /* If U extends beyond PREC, ignore the part that does. */
257 /* If V extends beyond PREC, ignore the part that does.
258 Note that this may make vsize negative. */
259 if (vsize
+ ediff
> prec
)
261 vp
+= vsize
+ ediff
- prec
;
262 vsize
= prec
- ediff
;
267 /* V completely cancelled. */
269 MPN_COPY (rp
, up
, usize
);
274 /* Allocate temp space for the result. Allocate
275 just vsize + ediff later??? */
276 tp
= TMP_ALLOC_LIMBS (prec
);
278 /* Locate the least significant non-zero limb in (the needed
279 parts of) U and V, to simplify the code below. */
284 MPN_COPY (rp
, up
, usize
);
296 MPN_COPY (rp
, vp
, vsize
);
306 /* uuuu | uuuu | uuuu | uuuu | uuuu */
307 /* vvvvvvv | vv | vvvvv | v | vv */
311 /* U and V partially overlaps. */
314 /* Have to compare the leading limbs of u and v
315 to determine whether to compute u - v or v - u. */
321 size
= usize
- vsize
;
322 MPN_COPY (tp
, up
, size
);
323 mpn_sub_n (tp
+ size
, up
+ size
, vp
, vsize
);
326 else /* (usize < vsize) */
331 size
= vsize
- usize
;
332 ASSERT_CARRY (mpn_neg (tp
, vp
, size
));
333 mpn_sub_nc (tp
+ size
, up
, vp
+ size
, usize
, CNST_LIMB (1));
339 if (vsize
+ ediff
<= usize
)
344 size
= usize
- ediff
- vsize
;
345 MPN_COPY (tp
, up
, size
);
346 mpn_sub (tp
+ size
, up
+ size
, usize
- size
, vp
, vsize
);
354 rsize
= vsize
+ ediff
;
355 size
= rsize
- usize
;
356 ASSERT_CARRY (mpn_neg (tp
, vp
, size
));
357 mpn_sub (tp
+ size
, up
, usize
, vp
+ size
, usize
- ediff
);
358 /* Should we use sub_nc then sub_1? */
359 MPN_DECR_U (tp
+ size
, usize
, CNST_LIMB (1));
368 size
= vsize
+ ediff
- usize
;
369 ASSERT_CARRY (mpn_neg (tp
, vp
, vsize
));
370 for (i
= vsize
; i
< size
; i
++)
371 tp
[i
] = GMP_NUMB_MAX
;
372 mpn_sub_1 (tp
+ size
, up
, usize
, (mp_limb_t
) 1);
373 rsize
= size
+ usize
;
377 /* Full normalize. Optimize later. */
378 while (rsize
!= 0 && tp
[rsize
- 1] == 0)
384 MPN_COPY (rp
, tp
, rsize
);
393 SIZ (r
) = negate
? -rsize
: rsize
;