1 /* mpf_ui_sub -- Subtract a float from an unsigned long int.
3 Copyright 1993, 1994, 1995, 1996, 2001, 2002, 2005 Free Software Foundation,
6 This file is part of the GNU MP Library.
8 The GNU MP Library is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or (at your
11 option) any later version.
13 The GNU MP Library is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16 License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
25 mpf_ui_sub (mpf_ptr r
, unsigned long int u
, mpf_srcptr v
)
29 mp_size_t usize
, vsize
, rsize
;
39 /* Handle special cases that don't work in generic code below. */
51 /* If signs of U and V are different, perform addition. */
54 __mpf_struct v_negated
;
55 v_negated
._mp_size
= -vsize
;
56 v_negated
._mp_exp
= v
->_mp_exp
;
57 v_negated
._mp_d
= v
->_mp_d
;
58 mpf_add_ui (r
, &v_negated
, u
);
64 /* Signs are now known to be the same. */
67 /* Make U be the operand with the largest exponent. */
76 prec
= r
->_mp_prec
+ 1;
90 ediff
= 1 - v
->_mp_exp
;
93 /* Ignore leading limbs in U and V that are equal. Doing
94 this helps increase the precision of the result. */
97 /* This loop normally exits immediately. Optimize for that. */
102 if (up
[usize
] != vp
[vsize
])
112 /* Note that either operand (but not both operands) might now have
113 leading zero limbs. It matters only that U is unnormalized if
114 vsize is now zero, and vice versa. And it is only in that case
115 that we have to adjust uexp. */
118 while (usize
!= 0 && up
[usize
- 1] == 0)
122 while (vsize
!= 0 && vp
[vsize
- 1] == 0)
126 /* If U extends beyond PREC, ignore the part that does. */
133 /* If V extends beyond PREC, ignore the part that does.
134 Note that this may make vsize negative. */
135 if (vsize
+ ediff
> prec
)
137 vp
+= vsize
+ ediff
- prec
;
138 vsize
= prec
- ediff
;
141 /* Allocate temp space for the result. Allocate
142 just vsize + ediff later??? */
143 tp
= TMP_ALLOC_LIMBS (prec
);
147 /* V completely cancelled. */
149 MPN_COPY (rp
, up
, usize
);
154 /* Locate the least significant non-zero limb in (the needed
155 parts of) U and V, to simplify the code below. */
160 MPN_COPY (rp
, up
, usize
);
172 MPN_COPY (rp
, vp
, vsize
);
182 /* uuuu | uuuu | uuuu | uuuu | uuuu */
183 /* vvvvvvv | vv | vvvvv | v | vv */
187 /* U and V partially overlaps. */
190 /* Have to compare the leading limbs of u and v
191 to determine whether to compute u - v or v - u. */
197 cmp
= mpn_cmp (up
+ usize
- vsize
, vp
, vsize
);
201 size
= usize
- vsize
;
202 MPN_COPY (tp
, up
, size
);
203 mpn_sub_n (tp
+ size
, up
+ size
, vp
, vsize
);
208 /* vv */ /* Swap U and V. */
211 size
= usize
- vsize
;
212 tp
[0] = -up
[0] & GMP_NUMB_MASK
;
213 for (i
= 1; i
< size
; i
++)
214 tp
[i
] = ~up
[i
] & GMP_NUMB_MASK
;
215 mpn_sub_n (tp
+ size
, vp
, up
+ size
, vsize
);
216 mpn_sub_1 (tp
+ size
, tp
+ size
, vsize
, (mp_limb_t
) 1);
221 else if (usize
< vsize
)
226 cmp
= mpn_cmp (up
, vp
+ vsize
- usize
, usize
);
230 size
= vsize
- usize
;
231 tp
[0] = -vp
[0] & GMP_NUMB_MASK
;
232 for (i
= 1; i
< size
; i
++)
233 tp
[i
] = ~vp
[i
] & GMP_NUMB_MASK
;
234 mpn_sub_n (tp
+ size
, up
, vp
+ size
, usize
);
235 mpn_sub_1 (tp
+ size
, tp
+ size
, usize
, (mp_limb_t
) 1);
240 /* vvvvvvv */ /* Swap U and V. */
242 /* This is the only place we can get 0.0. */
244 size
= vsize
- usize
;
245 MPN_COPY (tp
, vp
, size
);
246 mpn_sub_n (tp
+ size
, vp
+ size
, up
, usize
);
256 cmp
= mpn_cmp (up
, vp
+ vsize
- usize
, usize
);
259 mpn_sub_n (tp
, up
, vp
, usize
);
264 mpn_sub_n (tp
, vp
, up
, usize
);
273 if (vsize
+ ediff
<= usize
)
278 size
= usize
- ediff
- vsize
;
279 MPN_COPY (tp
, up
, size
);
280 mpn_sub (tp
+ size
, up
+ size
, usize
- size
, vp
, vsize
);
288 size
= vsize
+ ediff
- usize
;
289 tp
[0] = -vp
[0] & GMP_NUMB_MASK
;
290 for (i
= 1; i
< size
; i
++)
291 tp
[i
] = ~vp
[i
] & GMP_NUMB_MASK
;
292 mpn_sub (tp
+ size
, up
, usize
, vp
+ size
, usize
- ediff
);
293 mpn_sub_1 (tp
+ size
, tp
+ size
, usize
, (mp_limb_t
) 1);
294 rsize
= vsize
+ ediff
;
303 size
= vsize
+ ediff
- usize
;
304 tp
[0] = -vp
[0] & GMP_NUMB_MASK
;
305 for (i
= 1; i
< vsize
; i
++)
306 tp
[i
] = ~vp
[i
] & GMP_NUMB_MASK
;
307 for (i
= vsize
; i
< size
; i
++)
308 tp
[i
] = GMP_NUMB_MAX
;
309 mpn_sub_1 (tp
+ size
, up
, usize
, (mp_limb_t
) 1);
310 rsize
= size
+ usize
;
313 /* Full normalize. Optimize later. */
314 while (rsize
!= 0 && tp
[rsize
- 1] == 0)
319 MPN_COPY (rp
, tp
, rsize
);
323 r
->_mp_size
= negate
? -rsize
: rsize
;