1 /* Copyright (C) 2004-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
21 /* 64-bit signed long divide. These are not normal C functions. Argument
22 registers are t10 and t11, the result goes in t12. Only t12 and AT may
25 Theory of operation here is that we can use the FPU divider for virtually
26 all operands that we see: all dividend values between -2**53 and 2**53-1
27 can be computed directly. Note that divisor values need not be checked
28 against that range because the rounded fp value will be close enough such
29 that the quotient is < 1, which will properly be truncated to zero when we
30 convert back to integer.
32 When the dividend is outside the range for which we can compute exact
33 results, we use the fp quotent as an estimate from which we begin refining
34 an exact integral value. This reduces the number of iterations in the
35 shift-and-subtract loop significantly.
37 The FPCR save/restore is due to the fact that the EV6 _will_ set FPCR_INE
38 for cvttq/c even without /sui being set. It will not, however, properly
39 raise the exception, so we don't have to worry about FPCR_INED being clear
40 and so dying by SIGFPE. */
45 .type __divq, @funcnoplt
49 cfi_return_column (RA)
52 cfi_def_cfa_offset (FRAME)
55 /* Get the fp divide insn issued as quickly as possible. After
56 that's done, we have at least 22 cycles until its results are
57 ready -- all the time in the world to figure out how we're
58 going to use the results. */
65 cfi_rel_offset ($f0, 0)
66 cfi_rel_offset ($f1, 8)
67 cfi_rel_offset ($f3, 48)
70 _ITOFT2 X, $f0, 16, Y, $f1, 24
75 /* Check to see if X fit in the double as an exact value. */
82 /* If we get here, we're expecting exact results from the division.
83 Do nothing else besides convert and clean up. */
95 cfi_def_cfa_offset (0)
102 /* If we get here, X is large enough that we don't expect exact
103 results, and neither X nor Y got mis-translated for the fp
104 division. Our task is to take the fp result, figure out how
105 far it's off from the correct result and compute a fixup. */
110 cfi_rel_offset (t0, 16)
111 cfi_rel_offset (t1, 24)
112 cfi_rel_offset (t2, 32)
113 cfi_rel_offset (t5, 40)
115 #define Q RV /* quotient */
116 #define R t0 /* remainder */
117 #define SY t1 /* scaled Y */
118 #define S t2 /* scalar */
119 #define QY t3 /* Q*Y */
121 /* The fixup code below can only handle unsigned values. */
134 cfi_rel_offset (t3, 0)
140 cfi_rel_offset (t4, 8)
157 bne t5, $fix_sign_out
173 cfi_def_cfa_offset (0)
178 /* The quotient that we computed was too large. We need to reduce
179 it by S such that Y*S >= R. Obviously the closer we get to the
180 correct value the better, but overshooting high is ok, as we'll
181 fix that up later. */
195 /* The quotient that we computed was too small. Divide Y by the
196 current remainder (R) and add that to the existing quotient (Q).
197 The expectation, of course, is that R is much smaller than X. */
198 /* Begin with a shift-up loop. Compute S such that Y*S >= R. We
199 already have a copy of Y in SY and the value 1 in S. */
207 /* Shift-down and subtract loop. Each iteration compares our scaled
208 Y (SY) with the remainder (R); if SY <= R then X is divisible by
209 Y's scalar (S) so add it to the quotient (Q). */
224 /* If we got here, then X|Y is negative. Need to adjust everything
225 such that we're doing unsigned division in the fixup loop. */
226 /* T5 records the changes we had to make:
227 bit 0: set if result should be negative.
228 bit 2: set if X was negated.
229 bit 3: set if Y was negated.
244 blbc t5, $fix_sign_in_ret1
254 /* Now we get to undo what we did above. */
255 /* ??? Is this really faster than just increasing the size of
256 the stack frame and storing X and Y in memory? */
271 .size __divq, .-__divq