btxld(8): Convert to hostprog.
[dragonfly.git] / contrib / gmp / mpf / sub.c
blob575b412960d7e0c7c6e8f22fccf4a9b6644bb122
1 /* mpf_sub -- Subtract two floats.
3 Copyright 1993, 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005, 2011 Free
4 Software Foundation, Inc.
6 This file is part of the GNU MP Library.
8 The GNU MP Library is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or (at your
11 option) any later version.
13 The GNU MP Library is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16 License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
21 #include "gmp.h"
22 #include "gmp-impl.h"
24 void
25 mpf_sub (mpf_ptr r, mpf_srcptr u, mpf_srcptr v)
27 mp_srcptr up, vp;
28 mp_ptr rp, tp;
29 mp_size_t usize, vsize, rsize;
30 mp_size_t prec;
31 mp_exp_t exp;
32 mp_size_t ediff;
33 int negate;
34 TMP_DECL;
36 usize = u->_mp_size;
37 vsize = v->_mp_size;
39 /* Handle special cases that don't work in generic code below. */
40 if (usize == 0)
42 mpf_neg (r, v);
43 return;
45 if (vsize == 0)
47 if (r != u)
48 mpf_set (r, u);
49 return;
52 /* If signs of U and V are different, perform addition. */
53 if ((usize ^ vsize) < 0)
55 __mpf_struct v_negated;
56 v_negated._mp_size = -vsize;
57 v_negated._mp_exp = v->_mp_exp;
58 v_negated._mp_d = v->_mp_d;
59 mpf_add (r, u, &v_negated);
60 return;
63 TMP_MARK;
65 /* Signs are now known to be the same. */
66 negate = usize < 0;
68 /* Make U be the operand with the largest exponent. */
69 if (u->_mp_exp < v->_mp_exp)
71 mpf_srcptr t;
72 t = u; u = v; v = t;
73 negate ^= 1;
74 usize = u->_mp_size;
75 vsize = v->_mp_size;
78 usize = ABS (usize);
79 vsize = ABS (vsize);
80 up = u->_mp_d;
81 vp = v->_mp_d;
82 rp = r->_mp_d;
83 prec = r->_mp_prec + 1;
84 exp = u->_mp_exp;
85 ediff = u->_mp_exp - v->_mp_exp;
87 /* If ediff is 0 or 1, we might have a situation where the operands are
88 extremely close. We need to scan the operands from the most significant
89 end ignore the initial parts that are equal. */
90 if (ediff <= 1)
92 if (ediff == 0)
94 /* Skip leading limbs in U and V that are equal. */
95 if (up[usize - 1] == vp[vsize - 1])
97 /* This loop normally exits immediately. Optimize for that. */
100 usize--;
101 vsize--;
102 exp--;
104 if (usize == 0)
106 /* u cancels high limbs of v, result is rest of v */
107 negate ^= 1;
108 cancellation:
109 /* strip high zeros before truncating to prec */
110 while (vsize != 0 && vp[vsize - 1] == 0)
112 vsize--;
113 exp--;
115 if (vsize > prec)
117 vp += vsize - prec;
118 vsize = prec;
120 MPN_COPY_INCR (rp, vp, vsize);
121 rsize = vsize;
122 goto done;
124 if (vsize == 0)
126 vp = up;
127 vsize = usize;
128 goto cancellation;
131 while (up[usize - 1] == vp[vsize - 1]);
134 if (up[usize - 1] < vp[vsize - 1])
136 /* For simplicity, swap U and V. Note that since the loop above
137 wouldn't have exited unless up[usize - 1] and vp[vsize - 1]
138 were non-equal, this if-statement catches all cases where U
139 is smaller than V. */
140 MPN_SRCPTR_SWAP (up,usize, vp,vsize);
141 negate ^= 1;
142 /* negating ediff not necessary since it is 0. */
145 /* Check for
146 x+1 00000000 ...
147 x ffffffff ... */
148 if (up[usize - 1] != vp[vsize - 1] + 1)
149 goto general_case;
150 usize--;
151 vsize--;
152 exp--;
154 else /* ediff == 1 */
156 /* Check for
157 1 00000000 ...
158 0 ffffffff ... */
160 if (up[usize - 1] != 1 || vp[vsize - 1] != GMP_NUMB_MAX
161 || (usize >= 2 && up[usize - 2] != 0))
162 goto general_case;
164 usize--;
165 exp--;
168 /* Skip sequences of 00000000/ffffffff */
169 while (vsize != 0 && usize != 0 && up[usize - 1] == 0
170 && vp[vsize - 1] == GMP_NUMB_MAX)
172 usize--;
173 vsize--;
174 exp--;
177 if (usize == 0)
179 while (vsize != 0 && vp[vsize - 1] == GMP_NUMB_MAX)
181 vsize--;
182 exp--;
186 if (usize > prec - 1)
188 up += usize - (prec - 1);
189 usize = prec - 1;
191 if (vsize > prec - 1)
193 vp += vsize - (prec - 1);
194 vsize = prec - 1;
197 tp = TMP_ALLOC_LIMBS (prec);
199 mp_limb_t cy_limb;
200 if (vsize == 0)
202 mp_size_t size, i;
203 size = usize;
204 for (i = 0; i < size; i++)
205 tp[i] = up[i];
206 tp[size] = 1;
207 rsize = size + 1;
208 exp++;
209 goto normalize;
211 if (usize == 0)
213 mp_size_t size, i;
214 size = vsize;
215 for (i = 0; i < size; i++)
216 tp[i] = ~vp[i] & GMP_NUMB_MASK;
217 cy_limb = 1 - mpn_add_1 (tp, tp, vsize, (mp_limb_t) 1);
218 rsize = vsize;
219 if (cy_limb == 0)
221 tp[rsize] = 1;
222 rsize++;
223 exp++;
225 goto normalize;
227 if (usize >= vsize)
229 /* uuuu */
230 /* vv */
231 mp_size_t size;
232 size = usize - vsize;
233 MPN_COPY (tp, up, size);
234 cy_limb = mpn_sub_n (tp + size, up + size, vp, vsize);
235 rsize = usize;
237 else /* (usize < vsize) */
239 /* uuuu */
240 /* vvvvvvv */
241 mp_size_t size, i;
242 size = vsize - usize;
243 for (i = 0; i < size; i++)
244 tp[i] = ~vp[i] & GMP_NUMB_MASK;
245 cy_limb = mpn_sub_n (tp + size, up, vp + size, usize);
246 cy_limb+= mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1);
247 cy_limb-= mpn_add_1 (tp, tp, vsize, (mp_limb_t) 1);
248 rsize = vsize;
250 if (cy_limb == 0)
252 tp[rsize] = 1;
253 rsize++;
254 exp++;
256 goto normalize;
260 general_case:
261 /* If U extends beyond PREC, ignore the part that does. */
262 if (usize > prec)
264 up += usize - prec;
265 usize = prec;
268 /* If V extends beyond PREC, ignore the part that does.
269 Note that this may make vsize negative. */
270 if (vsize + ediff > prec)
272 vp += vsize + ediff - prec;
273 vsize = prec - ediff;
276 if (ediff >= prec)
278 /* V completely cancelled. */
279 if (rp != up)
280 MPN_COPY (rp, up, usize);
281 rsize = usize;
283 else
285 /* Allocate temp space for the result. Allocate
286 just vsize + ediff later??? */
287 tp = TMP_ALLOC_LIMBS (prec);
289 /* Locate the least significant non-zero limb in (the needed
290 parts of) U and V, to simplify the code below. */
291 for (;;)
293 if (vsize == 0)
295 MPN_COPY (rp, up, usize);
296 rsize = usize;
297 goto done;
299 if (vp[0] != 0)
300 break;
301 vp++, vsize--;
303 for (;;)
305 if (usize == 0)
307 MPN_COPY (rp, vp, vsize);
308 rsize = vsize;
309 negate ^= 1;
310 goto done;
312 if (up[0] != 0)
313 break;
314 up++, usize--;
317 /* uuuu | uuuu | uuuu | uuuu | uuuu */
318 /* vvvvvvv | vv | vvvvv | v | vv */
320 if (usize > ediff)
322 /* U and V partially overlaps. */
323 if (ediff == 0)
325 /* Have to compare the leading limbs of u and v
326 to determine whether to compute u - v or v - u. */
327 if (usize >= vsize)
329 /* uuuu */
330 /* vv */
331 mp_size_t size;
332 size = usize - vsize;
333 MPN_COPY (tp, up, size);
334 mpn_sub_n (tp + size, up + size, vp, vsize);
335 rsize = usize;
337 else /* (usize < vsize) */
339 /* uuuu */
340 /* vvvvvvv */
341 mp_size_t size, i;
342 size = vsize - usize;
343 tp[0] = -vp[0] & GMP_NUMB_MASK;
344 for (i = 1; i < size; i++)
345 tp[i] = ~vp[i] & GMP_NUMB_MASK;
346 mpn_sub_n (tp + size, up, vp + size, usize);
347 mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1);
348 rsize = vsize;
351 else
353 if (vsize + ediff <= usize)
355 /* uuuu */
356 /* v */
357 mp_size_t size;
358 size = usize - ediff - vsize;
359 MPN_COPY (tp, up, size);
360 mpn_sub (tp + size, up + size, usize - size, vp, vsize);
361 rsize = usize;
363 else
365 /* uuuu */
366 /* vvvvv */
367 mp_size_t size, i;
368 size = vsize + ediff - usize;
369 tp[0] = -vp[0] & GMP_NUMB_MASK;
370 for (i = 1; i < size; i++)
371 tp[i] = ~vp[i] & GMP_NUMB_MASK;
372 mpn_sub (tp + size, up, usize, vp + size, usize - ediff);
373 mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1);
374 rsize = vsize + ediff;
378 else
380 /* uuuu */
381 /* vv */
382 mp_size_t size, i;
383 size = vsize + ediff - usize;
384 tp[0] = -vp[0] & GMP_NUMB_MASK;
385 for (i = 1; i < vsize; i++)
386 tp[i] = ~vp[i] & GMP_NUMB_MASK;
387 for (i = vsize; i < size; i++)
388 tp[i] = GMP_NUMB_MAX;
389 mpn_sub_1 (tp + size, up, usize, (mp_limb_t) 1);
390 rsize = size + usize;
393 normalize:
394 /* Full normalize. Optimize later. */
395 while (rsize != 0 && tp[rsize - 1] == 0)
397 rsize--;
398 exp--;
400 MPN_COPY (rp, tp, rsize);
403 done:
404 r->_mp_size = negate ? -rsize : rsize;
405 if (rsize == 0)
406 exp = 0;
407 r->_mp_exp = exp;
408 TMP_FREE;