beta-0.89.2
[luatex.git] / source / libs / gmp / gmp-src / mpf / ui_sub.c
blob710502466225644151e8012cda6012b753274dda
1 /* mpf_ui_sub -- Subtract a float from an unsigned long int.
3 Copyright 1993-1996, 2001, 2002, 2005, 2014 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of either:
10 * the GNU Lesser General Public License as published by the Free
11 Software Foundation; either version 3 of the License, or (at your
12 option) any later version.
16 * the GNU General Public License as published by the Free Software
17 Foundation; either version 2 of the License, or (at your option) any
18 later version.
20 or both in parallel, as here.
22 The GNU MP Library is distributed in the hope that it will be useful, but
23 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received copies of the GNU General Public License and the
28 GNU Lesser General Public License along with the GNU MP Library. If not,
29 see https://www.gnu.org/licenses/. */
31 #include "gmp.h"
32 #include "gmp-impl.h"
34 void
35 mpf_ui_sub (mpf_ptr r, unsigned long int u, mpf_srcptr v)
37 #if 1
38 __mpf_struct uu;
39 mp_limb_t ul;
41 if (u == 0)
43 mpf_neg (r, v);
44 return;
47 ul = u;
48 uu._mp_size = 1;
49 uu._mp_d = &ul;
50 uu._mp_exp = 1;
51 mpf_sub (r, &uu, v);
53 #else
54 mp_srcptr up, vp;
55 mp_ptr rp, tp;
56 mp_size_t usize, vsize, rsize;
57 mp_size_t prec;
58 mp_exp_t uexp;
59 mp_size_t ediff;
60 int negate;
61 mp_limb_t ulimb;
62 TMP_DECL;
64 vsize = v->_mp_size;
66 /* Handle special cases that don't work in generic code below. */
67 if (u == 0)
69 mpf_neg (r, v);
70 return;
72 if (vsize == 0)
74 mpf_set_ui (r, u);
75 return;
78 /* If signs of U and V are different, perform addition. */
79 if (vsize < 0)
81 __mpf_struct v_negated;
82 v_negated._mp_size = -vsize;
83 v_negated._mp_exp = v->_mp_exp;
84 v_negated._mp_d = v->_mp_d;
85 mpf_add_ui (r, &v_negated, u);
86 return;
89 /* Signs are now known to be the same. */
90 ASSERT (vsize > 0);
91 ulimb = u;
92 /* Make U be the operand with the largest exponent. */
93 negate = 1 < v->_mp_exp;
94 prec = r->_mp_prec + negate;
95 rp = r->_mp_d;
96 if (negate)
98 usize = vsize;
99 vsize = 1;
100 up = v->_mp_d;
101 vp = &ulimb;
102 uexp = v->_mp_exp;
103 ediff = uexp - 1;
105 /* If U extends beyond PREC, ignore the part that does. */
106 if (usize > prec)
108 up += usize - prec;
109 usize = prec;
111 ASSERT (ediff > 0);
113 else
115 vp = v->_mp_d;
116 ediff = 1 - v->_mp_exp;
117 /* Ignore leading limbs in U and V that are equal. Doing
118 this helps increase the precision of the result. */
119 if (ediff == 0 && ulimb == vp[vsize - 1])
121 usize = 0;
122 vsize--;
123 uexp = 0;
124 /* Note that V might now have leading zero limbs.
125 In that case we have to adjust uexp. */
126 for (;;)
128 if (vsize == 0) {
129 rsize = 0;
130 uexp = 0;
131 goto done;
133 if ( vp[vsize - 1] != 0)
134 break;
135 vsize--, uexp--;
138 else
140 usize = 1;
141 uexp = 1;
142 up = &ulimb;
144 ASSERT (usize <= prec);
147 if (ediff >= prec)
149 /* V completely cancelled. */
150 if (rp != up)
151 MPN_COPY (rp, up, usize);
152 rsize = usize;
154 else
156 /* If V extends beyond PREC, ignore the part that does.
157 Note that this can make vsize neither zero nor negative. */
158 if (vsize + ediff > prec)
160 vp += vsize + ediff - prec;
161 vsize = prec - ediff;
164 /* Locate the least significant non-zero limb in (the needed
165 parts of) U and V, to simplify the code below. */
166 ASSERT (vsize > 0);
167 for (;;)
169 if (vp[0] != 0)
170 break;
171 vp++, vsize--;
172 if (vsize == 0)
174 MPN_COPY (rp, up, usize);
175 rsize = usize;
176 goto done;
179 for (;;)
181 if (usize == 0)
183 MPN_COPY (rp, vp, vsize);
184 rsize = vsize;
185 negate ^= 1;
186 goto done;
188 if (up[0] != 0)
189 break;
190 up++, usize--;
193 ASSERT (usize > 0 && vsize > 0);
194 TMP_MARK;
196 tp = TMP_ALLOC_LIMBS (prec);
198 /* uuuu | uuuu | uuuu | uuuu | uuuu */
199 /* vvvvvvv | vv | vvvvv | v | vv */
201 if (usize > ediff)
203 /* U and V partially overlaps. */
204 if (ediff == 0)
206 ASSERT (usize == 1 && vsize >= 1 && ulimb == *up); /* usize is 1>ediff, vsize >= 1 */
207 if (1 < vsize)
209 /* u */
210 /* vvvvvvv */
211 rsize = vsize;
212 vsize -= 1;
213 /* mpn_cmp (up, vp + vsize - usize, usize) > 0 */
214 if (ulimb > vp[vsize])
216 tp[vsize] = ulimb - vp[vsize] - 1;
217 ASSERT_CARRY (mpn_neg (tp, vp, vsize));
219 else
221 /* vvvvvvv */ /* Swap U and V. */
222 /* u */
223 MPN_COPY (tp, vp, vsize);
224 tp[vsize] = vp[vsize] - ulimb;
225 negate = 1;
228 else /* vsize == usize == 1 */
230 /* u */
231 /* v */
232 rsize = 1;
233 negate = ulimb < vp[0];
234 tp[0] = negate ? vp[0] - ulimb: ulimb - vp[0];
237 else
239 ASSERT (vsize + ediff <= usize);
240 ASSERT (vsize == 1 && usize >= 2 && ulimb == *vp);
242 /* uuuu */
243 /* v */
244 mp_size_t size;
245 size = usize - ediff - 1;
246 MPN_COPY (tp, up, size);
247 ASSERT_NOCARRY (mpn_sub_1 (tp + size, up + size, usize - size, ulimb));
248 rsize = usize;
250 /* Other cases are not possible */
251 /* uuuu */
252 /* vvvvv */
255 else
257 /* uuuu */
258 /* vv */
259 mp_size_t size, i;
260 ASSERT_CARRY (mpn_neg (tp, vp, vsize));
261 rsize = vsize + ediff;
262 size = rsize - usize;
263 for (i = vsize; i < size; i++)
264 tp[i] = GMP_NUMB_MAX;
265 ASSERT_NOCARRY (mpn_sub_1 (tp + size, up, usize, CNST_LIMB (1)));
268 /* Full normalize. Optimize later. */
269 while (rsize != 0 && tp[rsize - 1] == 0)
271 rsize--;
272 uexp--;
274 MPN_COPY (rp, tp, rsize);
275 TMP_FREE;
278 done:
279 r->_mp_size = negate ? -rsize : rsize;
280 r->_mp_exp = uexp;
281 #endif