1 /* IEEE-754 double-precision functions for Xtensa
2 Copyright (C) 2006-2018 Free Software Foundation, Inc.
3 Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
38 /* Warning! The branch displacements for some Xtensa branch instructions
39 are quite small, and this code has been carefully laid out to keep
40 branch targets in range. If you change anything, be sure to check that
41 the assembler is not relaxing anything to branch over a jump. */
47 .type __negdf2, @function
61 /* Handle NaNs and Infinities. (This code is placed before the
62 start of the function just to keep it in range of the limited
63 branch displacements.) */
66 /* If y is neither Infinity nor NaN, return x. */
68 /* If x is a NaN, return it. Otherwise, return y. */
71 beqz a7, .Ladd_ynan_or_inf
81 /* Operand signs differ. Do a subtraction. */
88 .type __adddf3, @function
93 /* Check if the two operands have the same sign. */
95 bltz a7, .Ladd_opposite_signs
98 /* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */
99 ball xh, a6, .Ladd_xnan_or_inf
100 ball yh, a6, .Ladd_ynan_or_inf
102 /* Compare the exponents. The smaller operand will be shifted
103 right by the exponent difference and added to the larger
107 bltu a7, a8, .Ladd_shiftx
110 /* Check if the smaller (or equal) exponent is zero. */
111 bnone yh, a6, .Ladd_yexpzero
113 /* Replace yh sign/exponent with 0x001. */
119 /* Compute the exponent difference. Optimize for difference < 32. */
121 bgeui a10, 32, .Ladd_bigshifty
123 /* Shift yh/yl right by the exponent difference. Any bits that are
124 shifted out of yl are saved in a9 for rounding the result. */
132 /* Do the 64-bit addition. */
138 /* Check if the add overflowed into the exponent. */
139 extui a10, xh, 20, 12
140 beq a10, a7, .Ladd_round
145 /* y is a subnormal value. Replace its sign/exponent with zero,
146 i.e., no implicit "1.0", and increment the apparent exponent
147 because subnormals behave as if they had the minimum (nonzero)
148 exponent. Test for the case when both exponents are zero. */
151 bnone xh, a6, .Ladd_bothexpzero
156 /* Both exponents are zero. Handle this as a special case. There
157 is no need to shift or round, and the normal code for handling
158 a carry into the exponent field will not work because it
159 assumes there is an implicit "1.0" that needs to be added. */
167 /* Exponent difference > 64 -- just return the bigger value. */
170 /* Shift yh/yl right by the exponent difference. Any bits that are
171 shifted out are saved in a9 for rounding the result. */
173 sll a11, yl /* lost bits shifted out of yl */
178 or a9, a9, a10 /* any positive, nonzero value will work */
182 /* Same as "yexpzero" except skip handling the case when both
183 exponents are zero. */
190 /* Same thing as the "shifty" code, but with x and y swapped. Also,
191 because the exponent difference is always nonzero in this version,
192 the shift sequence can use SLL and skip loading a constant zero. */
193 bnone xh, a6, .Ladd_xexpzero
201 bgeui a10, 32, .Ladd_bigshiftx
214 /* Check if the add overflowed into the exponent. */
215 extui a10, xh, 20, 12
216 bne a10, a8, .Ladd_carry
219 /* Round up if the leftover fraction is >= 1/2. */
222 beqz xl, .Ladd_roundcarry
224 /* Check if the leftover fraction is exactly 1/2. */
226 beqz a9, .Ladd_exactlyhalf
230 /* Mostly the same thing as "bigshifty".... */
231 bgeui a10, 64, .Ladd_returny
248 /* The addition has overflowed into the exponent field, so the
249 value needs to be renormalized. The mantissa of the result
250 can be recovered by subtracting the original exponent and
251 adding 0x100000 (which is the explicit "1.0" for the
252 mantissa of the non-shifted operand -- the "1.0" for the
253 shifted operand was already added). The mantissa can then
254 be shifted right by one bit. The explicit "1.0" of the
255 shifted mantissa then needs to be replaced by the exponent,
256 incremented by one to account for the normalizing shift.
257 It is faster to combine these operations: do the shift first
258 and combine the additions and subtractions. If x is the
259 original exponent, the result is:
260 shifted mantissa - (x << 19) + (1 << 19) + (x << 20)
262 shifted mantissa + ((x + 1) << 19)
263 Note that the exponent is incremented here by leaving the
264 explicit "1.0" of the mantissa in the exponent field. */
266 /* Shift xh/xl right by one bit. Save the lsb of xl. */
272 /* See explanation above. The original exponent is in a8. */
277 /* Return an Infinity if the exponent overflowed. */
278 ball xh, a6, .Ladd_infinity
280 /* Same thing as the "round" code except the msb of the leftover
281 fraction is bit 0 of a10, with the rest of the fraction in a9. */
284 beqz xl, .Ladd_roundcarry
285 beqz a9, .Ladd_exactlyhalf
289 /* Clear the mantissa. */
294 /* The sign bit may have been lost in a carry-out. Put it back. */
300 /* Round down to the nearest even value. */
306 /* xl is always zero when the rounding increment overflows, so
307 there's no need to round it to an even value. */
309 /* Overflow to the exponent is OK. */
316 /* Handle NaNs and Infinities. (This code is placed before the
317 start of the function just to keep it in range of the limited
318 branch displacements.) */
321 /* If y is neither Infinity nor NaN, return x. */
323 /* Both x and y are either NaN or Inf, so the result is NaN. */
324 movi a4, 0x80000 /* make it a quiet NaN */
329 /* Negate y and return it. */
335 .Lsub_opposite_signs:
336 /* Operand signs differ. Do an addition. */
343 .type __subdf3, @function
348 /* Check if the two operands have the same sign. */
350 bltz a7, .Lsub_opposite_signs
353 /* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */
354 ball xh, a6, .Lsub_xnan_or_inf
355 ball yh, a6, .Lsub_ynan_or_inf
357 /* Compare the operands. In contrast to addition, the entire
358 value matters here. */
361 bltu xh, yh, .Lsub_xsmaller
362 beq xh, yh, .Lsub_compare_low
365 /* Check if the smaller (or equal) exponent is zero. */
366 bnone yh, a6, .Lsub_yexpzero
368 /* Replace yh sign/exponent with 0x001. */
374 /* Compute the exponent difference. Optimize for difference < 32. */
376 bgeui a10, 32, .Lsub_bigshifty
378 /* Shift yh/yl right by the exponent difference. Any bits that are
379 shifted out of yl are saved in a9 for rounding the result. */
387 /* Do the 64-bit subtraction. */
393 /* Subtract the leftover bits in a9 from zero and propagate any
394 borrow from xh/xl. */
401 /* Check if the subtract underflowed into the exponent. */
402 extui a10, xh, 20, 11
403 beq a10, a7, .Lsub_round
407 /* The high words are equal. Compare the low words. */
408 bltu xl, yl, .Lsub_xsmaller
409 bltu yl, xl, .Lsub_ysmaller
410 /* The operands are equal. Return 0.0. */
416 /* y is a subnormal value. Replace its sign/exponent with zero,
417 i.e., no implicit "1.0". Unless x is also a subnormal, increment
418 y's apparent exponent because subnormals behave as if they had
419 the minimum (nonzero) exponent. */
422 bnone xh, a6, .Lsub_yexpdiff
427 /* Exponent difference > 64 -- just return the bigger value. */
430 /* Shift yh/yl right by the exponent difference. Any bits that are
431 shifted out are saved in a9 for rounding the result. */
433 sll a11, yl /* lost bits shifted out of yl */
438 or a9, a9, a10 /* any positive, nonzero value will work */
442 /* Same thing as the "ysmaller" code, but with x and y swapped and
444 bnone xh, a6, .Lsub_xexpzero
452 bgeui a10, 32, .Lsub_bigshiftx
470 /* Subtract the leftover bits in a9 from zero and propagate any
471 borrow from xh/xl. */
478 /* Check if the subtract underflowed into the exponent. */
479 extui a10, xh, 20, 11
480 bne a10, a8, .Lsub_borrow
483 /* Round up if the leftover fraction is >= 1/2. */
486 beqz xl, .Lsub_roundcarry
488 /* Check if the leftover fraction is exactly 1/2. */
490 beqz a9, .Lsub_exactlyhalf
494 /* Same as "yexpzero". */
497 bnone yh, a6, .Lsub_xexpdiff
502 /* Mostly the same thing as "bigshifty", but with the sign bit of the
503 shifted value set so that the subsequent subtraction flips the
505 bgeui a10, 64, .Lsub_returny
511 slli xh, a6, 11 /* set sign bit of xh */
517 /* Negate and return y. */
524 /* The subtraction has underflowed into the exponent field, so the
525 value needs to be renormalized. Shift the mantissa left as
526 needed to remove any leading zeros and adjust the exponent
527 accordingly. If the exponent is not large enough to remove
528 all the leading zeros, the result will be a subnormal value. */
531 beqz a8, .Lsub_xhzero
532 do_nsau a6, a8, a7, a11
534 bge a6, a10, .Lsub_subnormal
538 /* Shift the mantissa (a8/xl/a9) left by a6. */
544 /* Combine the shifted mantissa with the sign and exponent,
545 decrementing the exponent by a6. (The exponent has already
546 been decremented by one due to the borrow from the subtraction,
547 but adding the mantissa will increment the exponent by one.) */
555 /* Round down to the nearest even value. */
561 /* xl is always zero when the rounding increment overflows, so
562 there's no need to round it to an even value. */
564 /* Overflow to the exponent is OK. */
568 /* When normalizing the result, all the mantissa bits in the high
569 word are zero. Shift by "20 + (leading zero count of xl) + 1". */
570 do_nsau a6, xl, a7, a11
572 blt a10, a6, .Lsub_subnormal
574 .Lsub_normalize_shift:
575 bltui a6, 32, .Lsub_shift_lt32
589 /* The exponent is too small to shift away all the leading zeros.
590 Set a6 to the current exponent (which has already been
591 decremented by the borrow) so that the exponent of the result
592 will be zero. Do not add 1 to a6 in this case, because: (1)
593 adding the mantissa will not increment the exponent, so there is
594 no need to subtract anything extra from the exponent to
595 compensate, and (2) the effective exponent of a subnormal is 1
596 not 0 so the shift amount must be 1 smaller than normal. */
598 j .Lsub_normalize_shift
600 #endif /* L_addsubdf3 */
605 #if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
606 #define XCHAL_NO_MUL 1
612 /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
613 (This code is placed before the start of the function just to
614 keep it in range of the limited branch displacements.) */
617 /* Clear the sign bit of x. */
621 /* If x is zero, return zero. */
623 beqz a10, .Lmul_return_zero
625 /* Normalize x. Adjust the exponent in a8. */
626 beqz xh, .Lmul_xh_zero
627 do_nsau a10, xh, a11, a12
636 do_nsau a10, xl, a11, a12
641 bltz a10, .Lmul_xl_srl
651 /* Clear the sign bit of y. */
655 /* If y is zero, return zero. */
657 beqz a10, .Lmul_return_zero
659 /* Normalize y. Adjust the exponent in a9. */
660 beqz yh, .Lmul_yh_zero
661 do_nsau a10, yh, a11, a12
670 do_nsau a10, yl, a11, a12
675 bltz a10, .Lmul_yl_srl
685 /* Return zero with the appropriate sign bit. */
692 /* If y is zero, return NaN. */
696 movi a4, 0x80000 /* make it a quiet NaN */
700 /* If y is NaN, return y. */
701 bnall yh, a6, .Lmul_returnx
704 beqz a8, .Lmul_returnx
711 /* Set the sign bit and return. */
719 /* If x is zero, return NaN. */
720 bnez xl, .Lmul_returny
722 bnez a8, .Lmul_returny
723 movi a7, 0x80000 /* make it a quiet NaN */
729 .type __muldf3, @function
731 #if __XTENSA_CALL0_ABI__
739 /* This is not really a leaf function; allocate enough stack space
740 to allow CALL12s to a helper function. */
747 /* Get the sign of the result. */
750 /* Check for NaN and infinity. */
751 ball xh, a6, .Lmul_xnan_or_inf
752 ball yh, a6, .Lmul_ynan_or_inf
754 /* Extract the exponents. */
758 beqz a8, .Lmul_xexpzero
760 beqz a9, .Lmul_yexpzero
763 /* Add the exponents. */
766 /* Replace sign/exponent fields with explicit "1.0". */
773 /* Multiply 64x64 to 128 bits. The result ends up in xh/xl/a6.
774 The least-significant word of the result is thrown away except
775 that if it is nonzero, the lsb of a6 is set to 1. */
776 #if XCHAL_HAVE_MUL32_HIGH
778 /* Compute a6 with any carry-outs in a10. */
791 /* If the low word of the result is nonzero, set the lsb of a6. */
797 /* Compute xl with any carry-outs in a9. */
818 #else /* ! XCHAL_HAVE_MUL32_HIGH */
820 /* Break the inputs into 16-bit chunks and compute 16 32-bit partial
821 products. These partial products are:
846 where the input chunks are (hh, hl, lh, ll). If using the Mul16
847 or Mul32 multiplier options, these input chunks must be stored in
848 separate registers. For Mac16, the UMUL.AA.* opcodes can specify
849 that the inputs come from either half of the registers, so there
850 is no need to shift them out ahead of time. If there is no
851 multiply hardware, the 16-bit chunks can be extracted when setting
852 up the arguments to the separate multiply function. */
854 /* Save a7 since it is needed to hold a temporary value. */
856 #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
857 /* Calling a separate multiply function will clobber a0 and requires
858 use of a8 as a temporary, so save those values now. (The function
859 uses a custom ABI so nothing else needs to be saved.) */
864 #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
871 /* Get the high halves of the inputs into registers. */
882 #if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
883 /* Clear the high halves of the inputs. This does not matter
884 for MUL16 because the high bits are ignored. */
890 #endif /* MUL16 || MUL32 */
895 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
896 mul16u dst, xreg ## xhalf, yreg ## yhalf
898 #elif XCHAL_HAVE_MUL32
900 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
901 mull dst, xreg ## xhalf, yreg ## yhalf
903 #elif XCHAL_HAVE_MAC16
905 /* The preprocessor insists on inserting a space when concatenating after
906 a period in the definition of do_mul below. These macros are a workaround
907 using underscores instead of periods when doing the concatenation. */
908 #define umul_aa_ll umul.aa.ll
909 #define umul_aa_lh umul.aa.lh
910 #define umul_aa_hl umul.aa.hl
911 #define umul_aa_hh umul.aa.hh
913 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
914 umul_aa_ ## xhalf ## yhalf xreg, yreg; \
917 #else /* no multiply hardware */
919 #define set_arg_l(dst, src) \
920 extui dst, src, 0, 16
921 #define set_arg_h(dst, src) \
924 #if __XTENSA_CALL0_ABI__
925 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
926 set_arg_ ## xhalf (a13, xreg); \
927 set_arg_ ## yhalf (a14, yreg); \
928 call0 .Lmul_mulsi3; \
931 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
932 set_arg_ ## xhalf (a14, xreg); \
933 set_arg_ ## yhalf (a15, yreg); \
934 call12 .Lmul_mulsi3; \
936 #endif /* __XTENSA_CALL0_ABI__ */
938 #endif /* no multiply hardware */
940 /* Add pp1 and pp2 into a10 with carry-out in a9. */
941 do_mul(a10, xl, l, yl, h) /* pp 1 */
942 do_mul(a11, xl, h, yl, l) /* pp 2 */
948 /* Initialize a6 with a9/a10 shifted into position. Note that
949 this value can be safely incremented without any carry-outs. */
953 /* Compute the low word into a10. */
954 do_mul(a11, xl, l, yl, l) /* pp 0 */
960 /* Compute the contributions of pp0-5 to a6, with carry-outs in a9.
961 This is good enough to determine the low half of a6, so that any
962 nonzero bits from the low word of the result can be collapsed
963 into a6, freeing up a register. */
965 do_mul(a11, xl, l, yh, l) /* pp 3 */
970 do_mul(a11, xl, h, yl, h) /* pp 4 */
975 do_mul(a11, xh, l, yl, l) /* pp 5 */
980 /* Collapse any nonzero bits from the low word into a6. */
985 /* Add pp6-9 into a11 with carry-outs in a10. */
986 do_mul(a7, xl, l, yh, h) /* pp 6 */
987 do_mul(a11, xh, h, yl, l) /* pp 9 */
993 do_mul(a7, xl, h, yh, l) /* pp 7 */
998 do_mul(a7, xh, l, yl, h) /* pp 8 */
1003 /* Shift a10/a11 into position, and add low half of a11 to a6. */
1011 /* Add pp10-12 into xl with carry-outs in a9. */
1013 do_mul(xl, xl, h, yh, h) /* pp 10 */
1018 do_mul(a10, xh, l, yh, l) /* pp 11 */
1023 do_mul(a10, xh, h, yl, h) /* pp 12 */
1028 /* Add pp13-14 into a11 with carry-outs in a10. */
1029 do_mul(a11, xh, l, yh, h) /* pp 13 */
1030 do_mul(a7, xh, h, yh, l) /* pp 14 */
1036 /* Shift a10/a11 into position, and add low half of a11 to a6. */
1045 do_mul(xh, xh, h, yh, h) /* pp 15 */
1048 /* Restore values saved on the stack during the multiplication. */
1050 #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
1054 #endif /* ! XCHAL_HAVE_MUL32_HIGH */
1056 /* Shift left by 12 bits, unless there was a carry-out from the
1057 multiply, in which case, shift by 11 bits and increment the
1058 exponent. Note: It is convenient to use the constant 0x3ff
1059 instead of 0x400 when removing the extra exponent bias (so that
1060 it is easy to construct 0x7fe for the overflow check). Reverse
1061 the logic here to decrement the exponent sum by one unless there
1064 srli a5, xh, 21 - 12
1073 /* Subtract the extra bias from the exponent sum (plus one to account
1074 for the explicit "1.0" of the mantissa that will be added to the
1075 exponent in the final result). */
1079 /* Check for over/underflow. The value in a8 is one less than the
1080 final exponent, so values in the range 0..7fd are OK here. */
1081 slli a4, a4, 1 /* 0x7fe */
1082 bgeu a8, a4, .Lmul_overflow
1086 bgez a6, .Lmul_rounded
1088 beqz xl, .Lmul_roundcarry
1090 beqz a6, .Lmul_exactlyhalf
1093 /* Add the exponent to the mantissa. */
1098 /* Add the sign bit. */
1104 #if __XTENSA_CALL0_ABI__
1114 /* Round down to the nearest even value. */
1120 /* xl is always zero when the rounding increment overflows, so
1121 there's no need to round it to an even value. */
1123 /* Overflow is OK -- it will be added to the exponent. */
1127 bltz a8, .Lmul_underflow
1128 /* Return +/- Infinity. */
1129 addi a8, a4, 1 /* 0x7ff */
1135 /* Create a subnormal value, where the exponent field contains zero,
1136 but the effective exponent is 1. The value of a8 is one less than
1137 the actual exponent, so just negate it to get the shift amount. */
1141 bgeui a8, 32, .Lmul_bigshift
1143 /* Shift xh/xl right. Any bits that are shifted out of xl are saved
1144 in a6 (combined with the shifted-out bits currently in a6) for
1145 rounding the result. */
1152 bgeui a8, 64, .Lmul_flush_to_zero
1153 sll a10, xl /* lost bits shifted out of xl */
1159 /* Set the exponent to zero. */
1162 /* Pack any nonzero bits shifted out into a6. */
1163 beqz a9, .Lmul_round
1168 .Lmul_flush_to_zero:
1169 /* Return zero with the appropriate sign bit. */
1177 /* For Xtensa processors with no multiply hardware, this simplified
1178 version of _mulsi3 is used for multiplying 16-bit chunks of
1179 the floating-point mantissas. When using CALL0, this function
1180 uses a custom ABI: the inputs are passed in a13 and a14, the
1181 result is returned in a12, and a8 and a15 are clobbered. */
1185 .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
1187 1: add \tmp1, \src2, \dst
1188 extui \tmp2, \src1, 0, 1
1189 movnez \dst, \tmp1, \tmp2
1191 do_addx2 \tmp1, \src2, \dst, \tmp1
1192 extui \tmp2, \src1, 1, 1
1193 movnez \dst, \tmp1, \tmp2
1195 do_addx4 \tmp1, \src2, \dst, \tmp1
1196 extui \tmp2, \src1, 2, 1
1197 movnez \dst, \tmp1, \tmp2
1199 do_addx8 \tmp1, \src2, \dst, \tmp1
1200 extui \tmp2, \src1, 3, 1
1201 movnez \dst, \tmp1, \tmp2
1203 srli \src1, \src1, 4
1204 slli \src2, \src2, 4
1207 #if __XTENSA_CALL0_ABI__
1208 mul_mulsi3_body a12, a13, a14, a15, a8
1210 /* The result will be written into a2, so save that argument in a4. */
1212 mul_mulsi3_body a2, a4, a3, a5, a6
1215 #endif /* XCHAL_NO_MUL */
1216 #endif /* L_muldf3 */
1222 #if XCHAL_HAVE_DFP_DIV
1227 .type __divdf3, @function
1274 /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
1275 (This code is placed before the start of the function just to
1276 keep it in range of the limited branch displacements.) */
1279 /* Clear the sign bit of y. */
1283 /* Check for division by zero. */
1285 beqz a10, .Ldiv_yzero
1287 /* Normalize y. Adjust the exponent in a9. */
1288 beqz yh, .Ldiv_yh_zero
1289 do_nsau a10, yh, a11, a9
1298 do_nsau a10, yl, a11, a9
1303 bltz a10, .Ldiv_yl_srl
1313 /* y is zero. Return NaN if x is also zero; otherwise, infinity. */
1321 movi a4, 0x80000 /* make it a quiet NaN */
1327 /* Clear the sign bit of x. */
1331 /* If x is zero, return zero. */
1333 beqz a10, .Ldiv_return_zero
1335 /* Normalize x. Adjust the exponent in a8. */
1336 beqz xh, .Ldiv_xh_zero
1337 do_nsau a10, xh, a11, a8
1346 do_nsau a10, xl, a11, a8
1351 bltz a10, .Ldiv_xl_srl
1361 /* Return zero with the appropriate sign bit. */
1368 /* Set the sign bit of the result. */
1372 /* If y is NaN or Inf, return NaN. */
1374 movi a4, 0x80000 /* make it a quiet NaN */
1379 /* If y is Infinity, return zero. */
1382 beqz a8, .Ldiv_return_zero
1383 /* y is NaN; return it. */
1394 .type __divdf3, @function
1399 /* Get the sign of the result. */
1402 /* Check for NaN and infinity. */
1403 ball xh, a6, .Ldiv_xnan_or_inf
1404 ball yh, a6, .Ldiv_ynan_or_inf
1406 /* Extract the exponents. */
1407 extui a8, xh, 20, 11
1408 extui a9, yh, 20, 11
1410 beqz a9, .Ldiv_yexpzero
1412 beqz a8, .Ldiv_xexpzero
1415 /* Subtract the exponents. */
1418 /* Replace sign/exponent fields with explicit "1.0". */
1425 /* Set SAR for left shift by one. */
1428 /* The first digit of the mantissa division must be a one.
1429 Shift x (and adjust the exponent) as needed to make this true. */
1431 beq yh, xh, .Ldiv_highequal1
1436 /* Do the first subtraction and shift. */
1444 /* Put the quotient into a10/a11. */
1448 /* Divide one bit at a time for 52 bits. */
1450 #if XCHAL_HAVE_LOOPS
1451 loop a9, .Ldiv_loopend
1454 /* Shift the quotient << 1. */
1458 /* Is this digit a 0 or 1? */
1460 beq xh, yh, .Ldiv_highequal2
1462 /* Output a 1 and subtract. */
1469 /* Shift the dividend << 1. */
1473 #if !XCHAL_HAVE_LOOPS
1479 /* Add the exponent bias (less one to account for the explicit "1.0"
1480 of the mantissa that will be added to the exponent in the final
1485 /* Check for over/underflow. The value in a8 is one less than the
1486 final exponent, so values in the range 0..7fd are OK here. */
1487 addmi a9, a9, 0x400 /* 0x7fe */
1488 bgeu a8, a9, .Ldiv_overflow
1491 /* Round. The remainder (<< 1) is in xh/xl. */
1492 bltu xh, yh, .Ldiv_rounded
1493 beq xh, yh, .Ldiv_highequal3
1496 beqz a11, .Ldiv_roundcarry
1500 /* Add the exponent to the mantissa. */
1505 /* Add the sign bit. */
1516 bltu xl, yl, .Ldiv_rounded
1517 bne xl, yl, .Ldiv_roundup
1519 /* Remainder is exactly half the divisor. Round even. */
1521 beqz a11, .Ldiv_roundcarry
1527 bltz a8, .Ldiv_underflow
1528 /* Return +/- Infinity. */
1529 addi a8, a9, 1 /* 0x7ff */
1535 /* Create a subnormal value, where the exponent field contains zero,
1536 but the effective exponent is 1. The value of a8 is one less than
1537 the actual exponent, so just negate it to get the shift amount. */
1540 bgeui a8, 32, .Ldiv_bigshift
1542 /* Shift a10/a11 right. Any bits that are shifted out of a11 are
1543 saved in a6 for rounding the result. */
1550 bgeui a8, 64, .Ldiv_flush_to_zero
1551 sll a9, a11 /* lost bits shifted out of a11 */
1557 /* Set the exponent to zero. */
1560 /* Pack any nonzero remainder (in xh/xl) into a6. */
1566 /* Round a10/a11 based on the bits shifted out into a6. */
1567 1: bgez a6, .Ldiv_rounded
1569 beqz a11, .Ldiv_roundcarry
1571 bnez a6, .Ldiv_rounded
1577 /* a11 is always zero when the rounding increment overflows, so
1578 there's no need to round it to an even value. */
1580 /* Overflow to the exponent field is OK. */
1583 .Ldiv_flush_to_zero:
1584 /* Return zero with the appropriate sign bit. */
1590 #endif /* XCHAL_HAVE_DFP_DIV */
1592 #endif /* L_divdf3 */
1596 /* Equal and Not Equal */
1601 .set __nedf2, __eqdf2
1602 .type __eqdf2, @function
1608 /* The values are equal but NaN != NaN. Check the exponent. */
1620 /* Check if the mantissas are nonzero. */
1625 /* Check if x and y are zero with different signs. */
1628 or a7, a7, xl /* xl == yl here */
1630 /* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa
1631 or x when exponent(x) = 0x7ff and x == y. */
1642 .type __gtdf2, @function
1647 1: bnall yh, a6, .Lle_cmp
1649 /* Check if y is a NaN. */
1656 /* Check if x is a NaN. */
1664 /* Less Than or Equal */
1668 .type __ledf2, @function
1673 1: bnall yh, a6, .Lle_cmp
1675 /* Check if y is a NaN. */
1682 /* Check if x is a NaN. */
1690 /* Check if x and y have different signs. */
1692 bltz a7, .Lle_diff_signs
1694 /* Check if x is negative. */
1697 /* Check if x <= y. */
1705 /* Check if y <= x. */
1715 /* Check if both x and y are zero. */
1726 /* Greater Than or Equal */
1730 .type __gedf2, @function
1735 1: bnall yh, a6, .Llt_cmp
1737 /* Check if y is a NaN. */
1744 /* Check if x is a NaN. */
1756 .type __ltdf2, @function
1761 1: bnall yh, a6, .Llt_cmp
1763 /* Check if y is a NaN. */
1770 /* Check if x is a NaN. */
1778 /* Check if x and y have different signs. */
1780 bltz a7, .Llt_diff_signs
1782 /* Check if x is negative. */
1785 /* Check if x < y. */
1793 /* Check if y < x. */
1803 /* Check if both x and y are nonzero. */
1818 .type __unorddf2, @function
1839 #endif /* L_cmpdf2 */
1845 .type __fixdfsi, @function
1849 /* Check for NaN and Infinity. */
1851 ball xh, a6, .Lfixdfsi_nan_or_inf
1853 /* Extract the exponent and check if 0 < (exp - 0x3fe) < 32. */
1854 extui a4, xh, 20, 11
1855 extui a5, a6, 19, 10 /* 0x3fe */
1857 bgei a4, 32, .Lfixdfsi_maxint
1858 blti a4, 1, .Lfixdfsi_zero
1860 /* Add explicit "1.0" and shift << 11. */
1865 /* Shift back to the right, based on the exponent. */
1866 ssl a4 /* shift by 32 - a4 */
1869 /* Negate the result if sign != 0. */
1874 .Lfixdfsi_nan_or_inf:
1875 /* Handle Infinity and NaN. */
1878 beqz a4, .Lfixdfsi_maxint
1880 /* Translate NaN to +maxint. */
1884 slli a4, a6, 11 /* 0x80000000 */
1885 addi a5, a4, -1 /* 0x7fffffff */
1894 #endif /* L_fixdfsi */
1900 .type __fixdfdi, @function
1904 /* Check for NaN and Infinity. */
1906 ball xh, a6, .Lfixdfdi_nan_or_inf
1908 /* Extract the exponent and check if 0 < (exp - 0x3fe) < 64. */
1909 extui a4, xh, 20, 11
1910 extui a5, a6, 19, 10 /* 0x3fe */
1912 bgei a4, 64, .Lfixdfdi_maxint
1913 blti a4, 1, .Lfixdfdi_zero
1915 /* Add explicit "1.0" and shift << 11. */
1921 /* Shift back to the right, based on the exponent. */
1922 ssl a4 /* shift by 64 - a4 */
1923 bgei a4, 32, .Lfixdfdi_smallshift
1928 /* Negate the result if sign != 0. */
1936 .Lfixdfdi_smallshift:
1941 .Lfixdfdi_nan_or_inf:
1942 /* Handle Infinity and NaN. */
1945 beqz a4, .Lfixdfdi_maxint
1947 /* Translate NaN to +maxint. */
1951 slli a7, a6, 11 /* 0x80000000 */
1957 1: addi xh, a7, -1 /* 0x7fffffff */
1966 #endif /* L_fixdfdi */
1971 .global __fixunsdfsi
1972 .type __fixunsdfsi, @function
1976 /* Check for NaN and Infinity. */
1978 ball xh, a6, .Lfixunsdfsi_nan_or_inf
1980 /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 32. */
1981 extui a4, xh, 20, 11
1982 extui a5, a6, 20, 10 /* 0x3ff */
1984 bgei a4, 32, .Lfixunsdfsi_maxint
1985 bltz a4, .Lfixunsdfsi_zero
1987 /* Add explicit "1.0" and shift << 11. */
1992 /* Shift back to the right, based on the exponent. */
1994 beqi a4, 32, .Lfixunsdfsi_bigexp
1995 ssl a4 /* shift by 32 - a4 */
1998 /* Negate the result if sign != 0. */
2003 .Lfixunsdfsi_nan_or_inf:
2004 /* Handle Infinity and NaN. */
2007 beqz a4, .Lfixunsdfsi_maxint
2009 /* Translate NaN to 0xffffffff. */
2013 .Lfixunsdfsi_maxint:
2014 slli a4, a6, 11 /* 0x80000000 */
2015 movi a5, -1 /* 0xffffffff */
2024 .Lfixunsdfsi_bigexp:
2025 /* Handle unsigned maximum exponent case. */
2027 mov a2, a5 /* no shift needed */
2030 /* Return 0x80000000 if negative. */
2034 #endif /* L_fixunsdfsi */
2039 .global __fixunsdfdi
2040 .type __fixunsdfdi, @function
2044 /* Check for NaN and Infinity. */
2046 ball xh, a6, .Lfixunsdfdi_nan_or_inf
2048 /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 64. */
2049 extui a4, xh, 20, 11
2050 extui a5, a6, 20, 10 /* 0x3ff */
2052 bgei a4, 64, .Lfixunsdfdi_maxint
2053 bltz a4, .Lfixunsdfdi_zero
2055 /* Add explicit "1.0" and shift << 11. */
2061 /* Shift back to the right, based on the exponent. */
2063 beqi a4, 64, .Lfixunsdfdi_bigexp
2064 ssl a4 /* shift by 64 - a4 */
2065 bgei a4, 32, .Lfixunsdfdi_smallshift
2069 .Lfixunsdfdi_shifted:
2070 /* Negate the result if sign != 0. */
2078 .Lfixunsdfdi_smallshift:
2081 j .Lfixunsdfdi_shifted
2083 .Lfixunsdfdi_nan_or_inf:
2084 /* Handle Infinity and NaN. */
2087 beqz a4, .Lfixunsdfdi_maxint
2089 /* Translate NaN to 0xffffffff.... */
2094 .Lfixunsdfdi_maxint:
2096 2: slli xh, a6, 11 /* 0x80000000 */
2105 .Lfixunsdfdi_bigexp:
2106 /* Handle unsigned maximum exponent case. */
2108 leaf_return /* no shift needed */
2110 #endif /* L_fixunsdfdi */
2115 .global __floatunsidf
2116 .type __floatunsidf, @function
2119 beqz a2, .Lfloatsidf_return_zero
2121 /* Set the sign to zero and jump to the floatsidf code. */
2123 j .Lfloatsidf_normalize
2127 .type __floatsidf, @function
2131 /* Check for zero. */
2132 beqz a2, .Lfloatsidf_return_zero
2134 /* Save the sign. */
2137 /* Get the absolute value. */
2145 .Lfloatsidf_normalize:
2146 /* Normalize with the first 1 bit in the msb. */
2147 do_nsau a4, a2, a5, a6
2151 /* Shift the mantissa into position. */
2153 slli xl, a5, (32 - 11)
2155 /* Set the exponent. */
2156 movi a5, 0x41d /* 0x3fe + 31 */
2161 /* Add the sign and return. */
2166 .Lfloatsidf_return_zero:
2170 #endif /* L_floatsidf */
2175 .global __floatundidf
2176 .type __floatundidf, @function
2180 /* Check for zero. */
2184 /* Set the sign to zero and jump to the floatdidf code. */
2186 j .Lfloatdidf_normalize
2190 .type __floatdidf, @function
2194 /* Check for zero. */
2198 /* Save the sign. */
2201 /* Get the absolute value. */
2202 bgez xh, .Lfloatdidf_normalize
2205 beqz xl, .Lfloatdidf_normalize
2208 .Lfloatdidf_normalize:
2209 /* Normalize with the first 1 bit in the msb of xh. */
2210 beqz xh, .Lfloatdidf_bigshift
2211 do_nsau a4, xh, a5, a6
2216 .Lfloatdidf_shifted:
2217 /* Shift the mantissa into position, with rounding bits in a6. */
2223 /* Set the exponent. */
2224 movi a5, 0x43d /* 0x3fe + 63 */
2233 /* Round up if the leftover fraction is >= 1/2. */
2236 beqz xl, .Lfloatdidf_roundcarry
2238 /* Check if the leftover fraction is exactly 1/2. */
2240 beqz a6, .Lfloatdidf_exactlyhalf
2243 .Lfloatdidf_bigshift:
2244 /* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
2245 do_nsau a4, xl, a5, a6
2250 j .Lfloatdidf_shifted
2252 .Lfloatdidf_exactlyhalf:
2253 /* Round down to the nearest even value. */
2258 .Lfloatdidf_roundcarry:
2259 /* xl is always zero when the rounding increment overflows, so
2260 there's no need to round it to an even value. */
2262 /* Overflow to the exponent is OK. */
2265 #endif /* L_floatdidf */
2270 .global __truncdfsf2
2271 .type __truncdfsf2, @function
2275 /* Adjust the exponent bias. */
2276 movi a4, (0x3ff - 0x7f) << 20
2279 /* Check for underflow. */
2281 bltz a6, .Ltrunc_underflow
2282 extui a6, a5, 20, 11
2283 beqz a6, .Ltrunc_underflow
2285 /* Check for overflow. */
2287 bge a6, a4, .Ltrunc_overflow
2289 /* Shift a5/xl << 3 into a5/a4. */
2295 /* Add the sign bit. */
2300 /* Round up if the leftover fraction is >= 1/2. */
2303 /* Overflow to the exponent is OK. The answer will be correct. */
2305 /* Check if the leftover fraction is exactly 1/2. */
2307 beqz a4, .Ltrunc_exactlyhalf
2310 .Ltrunc_exactlyhalf:
2311 /* Round down to the nearest even value. */
2317 /* Check if exponent == 0x7ff. */
2321 /* Check if mantissa is nonzero. */
2326 /* Shift a4 to set a bit in the mantissa, making a quiet NaN. */
2329 1: slli a4, a4, 4 /* 0xff000000 or 0xff800000 */
2330 /* Add the sign bit. */
2337 /* Find shift count for a subnormal. Flush to zero if >= 32. */
2338 extui a6, xh, 20, 11
2339 movi a5, 0x3ff - 0x7f
2344 /* Replace the exponent with an explicit "1.0". */
2345 slli a5, a5, 13 /* 0x700000 */
2350 /* Shift the mantissa left by 3 bits (into a5/a4). */
2355 /* Shift right by a6. */
2360 beqz a7, .Ltrunc_addsign
2361 or a4, a4, a6 /* any positive, nonzero value will work */
2364 /* Return +/- zero. */
2365 1: extui a2, xh, 31, 1
2369 #endif /* L_truncdfsf2 */
2371 #ifdef L_extendsfdf2
2374 .global __extendsfdf2
2375 .type __extendsfdf2, @function
2379 /* Save the sign bit and then shift it off. */
2384 /* Extract and check the exponent. */
2386 beqz a6, .Lextend_expzero
2388 beqi a6, 256, .Lextend_nan_or_inf
2390 /* Shift >> 3 into a4/xl. */
2392 slli xl, a2, (32 - 3)
2394 /* Adjust the exponent bias. */
2395 movi a6, (0x3ff - 0x7f) << 20
2398 /* Add the sign bit. */
2402 .Lextend_nan_or_inf:
2405 /* Check for NaN. */
2409 slli a6, a6, 11 /* 0x80000 */
2412 /* Add the sign and return. */
2420 /* Normalize it to have 8 zero bits before the first 1 bit. */
2421 do_nsau a7, a4, a2, a3
2426 /* Shift >> 3 into a4/xl. */
2427 slli xl, a4, (32 - 3)
2430 /* Set the exponent. */
2431 movi a6, 0x3fe - 0x7f
2436 /* Add the sign and return. */
2440 #endif /* L_extendsfdf2 */
2443 #if XCHAL_HAVE_DFP_SQRT
2448 .global __ieee754_sqrt
2449 .type __ieee754_sqrt, @function
2496 #endif /* XCHAL_HAVE_DFP_SQRT */
2498 #if XCHAL_HAVE_DFP_RECIP
2504 .type __recipdf2, @function
2527 #endif /* L_recipdf2 */
2528 #endif /* XCHAL_HAVE_DFP_RECIP */
2530 #if XCHAL_HAVE_DFP_RSQRT
2532 /* Reciprocal square root */
2536 .type __rsqrtdf2, @function
2565 #endif /* L_rsqrtdf2 */
2566 #endif /* XCHAL_HAVE_DFP_RSQRT */