1 /* mpf_ui_sub -- Subtract a float from an unsigned long int.
3 Copyright 1993-1996, 2001, 2002, 2005, 2014 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of either:
10 * the GNU Lesser General Public License as published by the Free
11 Software Foundation; either version 3 of the License, or (at your
12 option) any later version.
16 * the GNU General Public License as published by the Free Software
17 Foundation; either version 2 of the License, or (at your option) any
20 or both in parallel, as here.
22 The GNU MP Library is distributed in the hope that it will be useful, but
23 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received copies of the GNU General Public License and the
28 GNU Lesser General Public License along with the GNU MP Library. If not,
29 see https://www.gnu.org/licenses/. */
35 mpf_ui_sub (mpf_ptr r
, unsigned long int u
, mpf_srcptr v
)
56 mp_size_t usize
, vsize
, rsize
;
66 /* Handle special cases that don't work in generic code below. */
78 /* If signs of U and V are different, perform addition. */
81 __mpf_struct v_negated
;
82 v_negated
._mp_size
= -vsize
;
83 v_negated
._mp_exp
= v
->_mp_exp
;
84 v_negated
._mp_d
= v
->_mp_d
;
85 mpf_add_ui (r
, &v_negated
, u
);
89 /* Signs are now known to be the same. */
92 /* Make U be the operand with the largest exponent. */
93 negate
= 1 < v
->_mp_exp
;
94 prec
= r
->_mp_prec
+ negate
;
105 /* If U extends beyond PREC, ignore the part that does. */
116 ediff
= 1 - v
->_mp_exp
;
117 /* Ignore leading limbs in U and V that are equal. Doing
118 this helps increase the precision of the result. */
119 if (ediff
== 0 && ulimb
== vp
[vsize
- 1])
124 /* Note that V might now have leading zero limbs.
125 In that case we have to adjust uexp. */
133 if ( vp
[vsize
- 1] != 0)
144 ASSERT (usize
<= prec
);
149 /* V completely cancelled. */
151 MPN_COPY (rp
, up
, usize
);
156 /* If V extends beyond PREC, ignore the part that does.
157 Note that this can make vsize neither zero nor negative. */
158 if (vsize
+ ediff
> prec
)
160 vp
+= vsize
+ ediff
- prec
;
161 vsize
= prec
- ediff
;
164 /* Locate the least significant non-zero limb in (the needed
165 parts of) U and V, to simplify the code below. */
174 MPN_COPY (rp
, up
, usize
);
183 MPN_COPY (rp
, vp
, vsize
);
193 ASSERT (usize
> 0 && vsize
> 0);
196 tp
= TMP_ALLOC_LIMBS (prec
);
198 /* uuuu | uuuu | uuuu | uuuu | uuuu */
199 /* vvvvvvv | vv | vvvvv | v | vv */
203 /* U and V partially overlaps. */
206 ASSERT (usize
== 1 && vsize
>= 1 && ulimb
== *up
); /* usize is 1>ediff, vsize >= 1 */
213 /* mpn_cmp (up, vp + vsize - usize, usize) > 0 */
214 if (ulimb
> vp
[vsize
])
216 tp
[vsize
] = ulimb
- vp
[vsize
] - 1;
217 ASSERT_CARRY (mpn_neg (tp
, vp
, vsize
));
221 /* vvvvvvv */ /* Swap U and V. */
223 MPN_COPY (tp
, vp
, vsize
);
224 tp
[vsize
] = vp
[vsize
] - ulimb
;
228 else /* vsize == usize == 1 */
233 negate
= ulimb
< vp
[0];
234 tp
[0] = negate
? vp
[0] - ulimb
: ulimb
- vp
[0];
239 ASSERT (vsize
+ ediff
<= usize
);
240 ASSERT (vsize
== 1 && usize
>= 2 && ulimb
== *vp
);
245 size
= usize
- ediff
- 1;
246 MPN_COPY (tp
, up
, size
);
247 ASSERT_NOCARRY (mpn_sub_1 (tp
+ size
, up
+ size
, usize
- size
, ulimb
));
250 /* Other cases are not possible */
260 ASSERT_CARRY (mpn_neg (tp
, vp
, vsize
));
261 rsize
= vsize
+ ediff
;
262 size
= rsize
- usize
;
263 for (i
= vsize
; i
< size
; i
++)
264 tp
[i
] = GMP_NUMB_MAX
;
265 ASSERT_NOCARRY (mpn_sub_1 (tp
+ size
, up
, usize
, CNST_LIMB (1)));
268 /* Full normalize. Optimize later. */
269 while (rsize
!= 0 && tp
[rsize
- 1] == 0)
274 MPN_COPY (rp
, tp
, rsize
);
279 r
->_mp_size
= negate
? -rsize
: rsize
;