sbin/mount_hammer: Use calloc(3) and cleanups
[dragonfly.git] / contrib / gmp / mpf / div_2exp.c
blobf74cd8bcda0dae3c980167aff31c090f400d41c6
1 /* mpf_div_2exp -- Divide a float by 2^n.
3 Copyright 1993, 1994, 1996, 2000, 2001, 2002, 2004 Free Software Foundation,
4 Inc.
6 This file is part of the GNU MP Library.
8 The GNU MP Library is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or (at your
11 option) any later version.
13 The GNU MP Library is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16 License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
21 #include "gmp.h"
22 #include "gmp-impl.h"
25 /* Multiples of GMP_NUMB_BITS in exp simply mean an amount subtracted from
26 EXP(u) to set EXP(r). The remainder exp%GMP_NUMB_BITS is then a right
27 shift for the limb data.
29 If exp%GMP_NUMB_BITS == 0 then there's no shifting, we effectively just
30 do an mpz_set with changed EXP(r). Like mpz_set we take prec+1 limbs in
31 this case. Although just prec would suffice, it's nice to have
32 mpf_div_2exp with exp==0 come out the same as mpz_set.
34 When shifting we take up to prec many limbs from the input. Our shift is
35 cy = mpn_rshift (PTR(r)+1, PTR(u)+k, ...), where k is the number of low
36 limbs dropped from u, and the carry out is stored to PTR(r)[0]. We don't
37 try to work extra bits from PTR(u)[k-1] (when k>=1 makes it available)
38 into that low carry limb. Just prec limbs (with the high non-zero) from
39 the input is enough bits for the application requested precision, no need
40 to do extra work.
42 If r==u the shift will have overlapping operands. When k>=1 (ie. when
43 usize > prec), the overlap is in the style supported by rshift (ie. dst
44 <= src).
46 But when r==u and k==0 (ie. usize <= prec), we would have an invalid
47 overlap (mpn_rshift (rp+1, rp, ...)). In this case we must instead use
48 mpn_lshift (PTR(r), PTR(u), size, NUMB-shift). An lshift by NUMB-shift
49 bits gives identical data of course, it's just its overlap restrictions
50 which differ.
52 In both shift cases, the resulting data is abs_usize+1 limbs. "adj" is
53 used to add +1 to that size if the high is non-zero (it may of course
54 have become zero by the shifting). EXP(u) is the exponent just above
55 those abs_usize+1 limbs, so it gets -1+adj, which means -1 if the high is
56 zero, or no change if the high is non-zero.
58 Enhancements:
60 The way mpn_lshift is used means successive mpf_div_2exp calls on the
61 same operand will accumulate low zero limbs, until prec+1 limbs is
62 reached. This is wasteful for subsequent operations. When abs_usize <=
63 prec, we should test the low exp%GMP_NUMB_BITS many bits of PTR(u)[0],
64 ie. those which would be shifted out by an mpn_rshift. If they're zero
65 then use that mpn_rshift. */
67 void
68 mpf_div_2exp (mpf_ptr r, mpf_srcptr u, mp_bitcnt_t exp)
70 mp_srcptr up;
71 mp_ptr rp = r->_mp_d;
72 mp_size_t usize;
73 mp_size_t abs_usize;
74 mp_size_t prec = r->_mp_prec;
75 mp_exp_t uexp = u->_mp_exp;
77 usize = u->_mp_size;
79 if (UNLIKELY (usize == 0))
81 r->_mp_size = 0;
82 r->_mp_exp = 0;
83 return;
86 abs_usize = ABS (usize);
87 up = u->_mp_d;
89 if (exp % GMP_NUMB_BITS == 0)
91 prec++; /* retain more precision here as we don't need
92 to account for carry-out here */
93 if (abs_usize > prec)
95 up += abs_usize - prec;
96 abs_usize = prec;
98 if (rp != up)
99 MPN_COPY_INCR (rp, up, abs_usize);
100 r->_mp_exp = uexp - exp / GMP_NUMB_BITS;
102 else
104 mp_limb_t cy_limb;
105 mp_size_t adj;
106 if (abs_usize > prec)
108 up += abs_usize - prec;
109 abs_usize = prec;
110 /* Use mpn_rshift since mpn_lshift operates downwards, and we
111 therefore would clobber part of U before using that part, in case
112 R is the same variable as U. */
113 cy_limb = mpn_rshift (rp + 1, up, abs_usize, exp % GMP_NUMB_BITS);
114 rp[0] = cy_limb;
115 adj = rp[abs_usize] != 0;
117 else
119 cy_limb = mpn_lshift (rp, up, abs_usize,
120 GMP_NUMB_BITS - exp % GMP_NUMB_BITS);
121 rp[abs_usize] = cy_limb;
122 adj = cy_limb != 0;
125 abs_usize += adj;
126 r->_mp_exp = uexp - exp / GMP_NUMB_BITS - 1 + adj;
128 r->_mp_size = usize >= 0 ? abs_usize : -abs_usize;