1 /* mpf_sub -- Subtract two floats.
3 Copyright 1993, 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005 Free
4 Software Foundation, Inc.
6 This file is part of the GNU MP Library.
8 The GNU MP Library is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or (at your
11 option) any later version.
13 The GNU MP Library is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16 License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
25 mpf_sub (mpf_ptr r
, mpf_srcptr u
, mpf_srcptr v
)
29 mp_size_t usize
, vsize
, rsize
;
39 /* Handle special cases that don't work in generic code below. */
52 /* If signs of U and V are different, perform addition. */
53 if ((usize
^ vsize
) < 0)
55 __mpf_struct v_negated
;
56 v_negated
._mp_size
= -vsize
;
57 v_negated
._mp_exp
= v
->_mp_exp
;
58 v_negated
._mp_d
= v
->_mp_d
;
59 mpf_add (r
, u
, &v_negated
);
65 /* Signs are now known to be the same. */
68 /* Make U be the operand with the largest exponent. */
69 if (u
->_mp_exp
< v
->_mp_exp
)
83 prec
= r
->_mp_prec
+ 1;
85 ediff
= u
->_mp_exp
- v
->_mp_exp
;
87 /* If ediff is 0 or 1, we might have a situation where the operands are
88 extremely close. We need to scan the operands from the most significant
89 end ignore the initial parts that are equal. */
94 /* Skip leading limbs in U and V that are equal. */
95 if (up
[usize
- 1] == vp
[vsize
- 1])
97 /* This loop normally exits immediately. Optimize for that. */
106 /* u cancels high limbs of v, result is rest of v */
109 /* strip high zeros before truncating to prec */
110 while (vsize
!= 0 && vp
[vsize
- 1] == 0)
120 MPN_COPY_INCR (rp
, vp
, vsize
);
131 while (up
[usize
- 1] == vp
[vsize
- 1]);
134 if (up
[usize
- 1] < vp
[vsize
- 1])
136 /* For simplicity, swap U and V. Note that since the loop above
137 wouldn't have exited unless up[usize - 1] and vp[vsize - 1]
138 were non-equal, this if-statement catches all cases where U
139 is smaller than V. */
140 MPN_SRCPTR_SWAP (up
,usize
, vp
,vsize
);
142 /* negating ediff not necessary since it is 0. */
148 if (up
[usize
- 1] != vp
[vsize
- 1] + 1)
154 else /* ediff == 1 */
160 if (up
[usize
- 1] != 1 || vp
[vsize
- 1] != GMP_NUMB_MAX
161 || (usize
>= 2 && up
[usize
- 2] != 0))
168 /* Skip sequences of 00000000/ffffffff */
169 while (vsize
!= 0 && usize
!= 0 && up
[usize
- 1] == 0
170 && vp
[vsize
- 1] == GMP_NUMB_MAX
)
179 while (vsize
!= 0 && vp
[vsize
- 1] == GMP_NUMB_MAX
)
186 if (usize
> prec
- 1)
188 up
+= usize
- (prec
- 1);
191 if (vsize
> prec
- 1)
193 vp
+= vsize
- (prec
- 1);
197 tp
= (mp_ptr
) TMP_ALLOC (prec
* BYTES_PER_MP_LIMB
);
204 for (i
= 0; i
< size
; i
++)
215 for (i
= 0; i
< size
; i
++)
216 tp
[i
] = ~vp
[i
] & GMP_NUMB_MASK
;
217 cy_limb
= 1 - mpn_add_1 (tp
, tp
, vsize
, (mp_limb_t
) 1);
232 size
= usize
- vsize
;
233 MPN_COPY (tp
, up
, size
);
234 cy_limb
= mpn_sub_n (tp
+ size
, up
+ size
, vp
, vsize
);
237 else /* (usize < vsize) */
242 size
= vsize
- usize
;
243 for (i
= 0; i
< size
; i
++)
244 tp
[i
] = ~vp
[i
] & GMP_NUMB_MASK
;
245 cy_limb
= mpn_sub_n (tp
+ size
, up
, vp
+ size
, usize
);
246 cy_limb
+= mpn_sub_1 (tp
+ size
, tp
+ size
, usize
, (mp_limb_t
) 1);
247 cy_limb
-= mpn_add_1 (tp
, tp
, vsize
, (mp_limb_t
) 1);
261 /* If U extends beyond PREC, ignore the part that does. */
268 /* If V extends beyond PREC, ignore the part that does.
269 Note that this may make vsize negative. */
270 if (vsize
+ ediff
> prec
)
272 vp
+= vsize
+ ediff
- prec
;
273 vsize
= prec
- ediff
;
276 /* Allocate temp space for the result. Allocate
277 just vsize + ediff later??? */
278 tp
= (mp_ptr
) TMP_ALLOC (prec
* BYTES_PER_MP_LIMB
);
282 /* V completely cancelled. */
284 MPN_COPY (rp
, up
, usize
);
289 /* Locate the least significant non-zero limb in (the needed
290 parts of) U and V, to simplify the code below. */
295 MPN_COPY (rp
, up
, usize
);
307 MPN_COPY (rp
, vp
, vsize
);
317 /* uuuu | uuuu | uuuu | uuuu | uuuu */
318 /* vvvvvvv | vv | vvvvv | v | vv */
322 /* U and V partially overlaps. */
325 /* Have to compare the leading limbs of u and v
326 to determine whether to compute u - v or v - u. */
332 size
= usize
- vsize
;
333 MPN_COPY (tp
, up
, size
);
334 mpn_sub_n (tp
+ size
, up
+ size
, vp
, vsize
);
337 else /* (usize < vsize) */
342 size
= vsize
- usize
;
343 tp
[0] = -vp
[0] & GMP_NUMB_MASK
;
344 for (i
= 1; i
< size
; i
++)
345 tp
[i
] = ~vp
[i
] & GMP_NUMB_MASK
;
346 mpn_sub_n (tp
+ size
, up
, vp
+ size
, usize
);
347 mpn_sub_1 (tp
+ size
, tp
+ size
, usize
, (mp_limb_t
) 1);
353 if (vsize
+ ediff
<= usize
)
358 size
= usize
- ediff
- vsize
;
359 MPN_COPY (tp
, up
, size
);
360 mpn_sub (tp
+ size
, up
+ size
, usize
- size
, vp
, vsize
);
368 size
= vsize
+ ediff
- usize
;
369 tp
[0] = -vp
[0] & GMP_NUMB_MASK
;
370 for (i
= 1; i
< size
; i
++)
371 tp
[i
] = ~vp
[i
] & GMP_NUMB_MASK
;
372 mpn_sub (tp
+ size
, up
, usize
, vp
+ size
, usize
- ediff
);
373 mpn_sub_1 (tp
+ size
, tp
+ size
, usize
, (mp_limb_t
) 1);
374 rsize
= vsize
+ ediff
;
383 size
= vsize
+ ediff
- usize
;
384 tp
[0] = -vp
[0] & GMP_NUMB_MASK
;
385 for (i
= 1; i
< vsize
; i
++)
386 tp
[i
] = ~vp
[i
] & GMP_NUMB_MASK
;
387 for (i
= vsize
; i
< size
; i
++)
388 tp
[i
] = GMP_NUMB_MAX
;
389 mpn_sub_1 (tp
+ size
, up
, usize
, (mp_limb_t
) 1);
390 rsize
= size
+ usize
;
394 /* Full normalize. Optimize later. */
395 while (rsize
!= 0 && tp
[rsize
- 1] == 0)
400 MPN_COPY (rp
, tp
, rsize
);
404 r
->_mp_size
= negate
? -rsize
: rsize
;