1 /* UltraSPARC 64 mpn_modexact_1c_odd -- mpn by limb exact style remainder.
3 THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY. THEY'RE ALMOST
4 CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
5 FUTURE GNU MP RELEASES.
7 Copyright 2000-2003 Free Software Foundation, Inc.
9 This file is part of the GNU MP Library.
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of either:
14 * the GNU Lesser General Public License as published by the Free
15 Software Foundation; either version 3 of the License, or (at your
16 option) any later version.
20 * the GNU General Public License as published by the Free Software
21 Foundation; either version 2 of the License, or (at your option) any
24 or both in parallel, as here.
26 The GNU MP Library is distributed in the hope that it will be useful, but
27 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
28 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
31 You should have received copies of the GNU General Public License and the
32 GNU Lesser General Public License along with the GNU MP Library. If not,
33 see https://www.gnu.org/licenses/. */
39 #include "mpn/sparc64/sparc64.h"
42 /* 64-bit divisor 32-bit divisor
43 cycles/limb cycles/limb
49 /* This implementation reduces the number of multiplies done, knowing that
50 on ultrasparc 1 and 2 the mulx instruction stalls the whole chip.
52 The key idea is to use the fact that the low limb of q*d equals l, this
53 being the whole purpose of the q calculated. It means there's no need to
54 calculate the lowest 32x32->64 part of the q*d, instead it can be
55 inferred from l and the other three 32x32->64 parts. See sparc64.h for
58 When d is 32-bits, the same applies, but in this case there's only one
59 other 32x32->64 part (ie. HIGH(q)*d).
61 The net effect is that for 64-bit divisor each limb is 4 mulx, or for
62 32-bit divisor each is 2 mulx.
66 No doubt this could be done in assembler, if that helped the scheduling,
67 or perhaps guaranteed good code irrespective of the compiler.
71 It might be possibly to use floating point. The loop is dominated by
72 multiply latency, so not sure if floats would improve that. One
73 possibility would be to take two limbs at a time, with a 128 bit inverse,
74 if there's enough registers, which could effectively use float throughput
75 to reduce total latency across two limbs. */
77 #define ASSERT_RETVAL(r) \
78 ASSERT (orig_c < d ? r < d : r <= d)
81 mpn_modexact_1c_odd (mp_srcptr src
, mp_size_t size
, mp_limb_t d
, mp_limb_t orig_c
)
84 mp_limb_t s
, l
, q
, h
, inverse
;
88 ASSERT_MPN (src
, size
);
92 /* udivx is faster than 10 or 12 mulx's for one limb via an inverse */
111 binvert_limb (inverse
, d
);
119 SUBC_LIMB (c
, l
, s
, c
);
122 umul_ppmm_half_lowequal (h
, q
, d
, l
);
130 /* With high s <= d the final step can be a subtract and addback.
131 If c==0 then the addback will restore to l>=0. If c==d then
132 will get l==d if s==0, but that's ok per the function
136 l
+= (l
> c
? d
: 0);
143 /* Can't skip a divide, just do the loop code once more. */
144 SUBC_LIMB (c
, l
, s
, c
);
146 umul_ppmm_half_lowequal (h
, q
, d
, l
);
155 mp_limb_t dl
= LOW32 (d
);
156 mp_limb_t dh
= HIGH32 (d
);
163 SUBC_LIMB (c
, l
, s
, c
);
166 umul_ppmm_lowequal (h
, q
, d
, dh
, dl
, l
);
174 /* With high s <= d the final step can be a subtract and addback.
175 If c==0 then the addback will restore to l>=0. If c==d then
176 will get l==d if s==0, but that's ok per the function
180 l
+= (l
> c
? d
: 0);
187 /* Can't skip a divide, just do the loop code once more. */
188 SUBC_LIMB (c
, l
, s
, c
);
190 umul_ppmm_lowequal (h
, q
, d
, dh
, dl
, l
);