1 /* IEEE-754 double-precision functions for Xtensa
2 Copyright (C) 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 In addition to the permissions in the GNU General Public License,
13 the Free Software Foundation gives you unlimited permission to link
14 the compiled version of this file into combinations with other
15 programs, and to distribute those combinations without any
16 restriction coming from the use of this file. (The General Public
17 License restrictions do apply in other respects; for example, they
18 cover modification of the file, and distribution when not linked
19 into a combine executable.)
21 GCC is distributed in the hope that it will be useful, but WITHOUT
22 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
23 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
24 License for more details.
26 You should have received a copy of the GNU General Public License
27 along with GCC; see the file COPYING. If not, write to the Free
28 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
43 /* Warning! The branch displacements for some Xtensa branch instructions
44 are quite small, and this code has been carefully laid out to keep
45 branch targets in range. If you change anything, be sure to check that
46 the assembler is not relaxing anything to branch over a jump. */
52 .type __negdf2, @function
66 /* Handle NaNs and Infinities. (This code is placed before the
67 start of the function just to keep it in range of the limited
68 branch displacements.) */
71 /* If y is neither Infinity nor NaN, return x. */
73 /* If x is a NaN, return it. Otherwise, return y. */
76 beqz a7, .Ladd_ynan_or_inf
86 /* Operand signs differ. Do a subtraction. */
93 .type __adddf3, @function
98 /* Check if the two operands have the same sign. */
100 bltz a7, .Ladd_opposite_signs
103 /* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */
104 ball xh, a6, .Ladd_xnan_or_inf
105 ball yh, a6, .Ladd_ynan_or_inf
107 /* Compare the exponents. The smaller operand will be shifted
108 right by the exponent difference and added to the larger
112 bltu a7, a8, .Ladd_shiftx
115 /* Check if the smaller (or equal) exponent is zero. */
116 bnone yh, a6, .Ladd_yexpzero
118 /* Replace yh sign/exponent with 0x001. */
124 /* Compute the exponent difference. Optimize for difference < 32. */
126 bgeui a10, 32, .Ladd_bigshifty
128 /* Shift yh/yl right by the exponent difference. Any bits that are
129 shifted out of yl are saved in a9 for rounding the result. */
137 /* Do the 64-bit addition. */
143 /* Check if the add overflowed into the exponent. */
144 extui a10, xh, 20, 12
145 beq a10, a7, .Ladd_round
150 /* y is a subnormal value. Replace its sign/exponent with zero,
151 i.e., no implicit "1.0", and increment the apparent exponent
152 because subnormals behave as if they had the minimum (nonzero)
153 exponent. Test for the case when both exponents are zero. */
156 bnone xh, a6, .Ladd_bothexpzero
161 /* Both exponents are zero. Handle this as a special case. There
162 is no need to shift or round, and the normal code for handling
163 a carry into the exponent field will not work because it
164 assumes there is an implicit "1.0" that needs to be added. */
172 /* Exponent difference > 64 -- just return the bigger value. */
175 /* Shift yh/yl right by the exponent difference. Any bits that are
176 shifted out are saved in a9 for rounding the result. */
178 sll a11, yl /* lost bits shifted out of yl */
183 or a9, a9, a10 /* any positive, nonzero value will work */
187 /* Same as "yexpzero" except skip handling the case when both
188 exponents are zero. */
195 /* Same thing as the "shifty" code, but with x and y swapped. Also,
196 because the exponent difference is always nonzero in this version,
197 the shift sequence can use SLL and skip loading a constant zero. */
198 bnone xh, a6, .Ladd_xexpzero
206 bgeui a10, 32, .Ladd_bigshiftx
219 /* Check if the add overflowed into the exponent. */
220 extui a10, xh, 20, 12
221 bne a10, a8, .Ladd_carry
224 /* Round up if the leftover fraction is >= 1/2. */
227 beqz xl, .Ladd_roundcarry
229 /* Check if the leftover fraction is exactly 1/2. */
231 beqz a9, .Ladd_exactlyhalf
235 /* Mostly the same thing as "bigshifty".... */
236 bgeui a10, 64, .Ladd_returny
253 /* The addition has overflowed into the exponent field, so the
254 value needs to be renormalized. The mantissa of the result
255 can be recovered by subtracting the original exponent and
256 adding 0x100000 (which is the explicit "1.0" for the
257 mantissa of the non-shifted operand -- the "1.0" for the
258 shifted operand was already added). The mantissa can then
259 be shifted right by one bit. The explicit "1.0" of the
260 shifted mantissa then needs to be replaced by the exponent,
261 incremented by one to account for the normalizing shift.
262 It is faster to combine these operations: do the shift first
263 and combine the additions and subtractions. If x is the
264 original exponent, the result is:
265 shifted mantissa - (x << 19) + (1 << 19) + (x << 20)
267 shifted mantissa + ((x + 1) << 19)
268 Note that the exponent is incremented here by leaving the
269 explicit "1.0" of the mantissa in the exponent field. */
271 /* Shift xh/xl right by one bit. Save the lsb of xl. */
277 /* See explanation above. The original exponent is in a8. */
282 /* Return an Infinity if the exponent overflowed. */
283 ball xh, a6, .Ladd_infinity
285 /* Same thing as the "round" code except the msb of the leftover
286 fraction is bit 0 of a10, with the rest of the fraction in a9. */
289 beqz xl, .Ladd_roundcarry
290 beqz a9, .Ladd_exactlyhalf
294 /* Clear the mantissa. */
299 /* The sign bit may have been lost in a carry-out. Put it back. */
305 /* Round down to the nearest even value. */
311 /* xl is always zero when the rounding increment overflows, so
312 there's no need to round it to an even value. */
314 /* Overflow to the exponent is OK. */
321 /* Handle NaNs and Infinities. (This code is placed before the
322 start of the function just to keep it in range of the limited
323 branch displacements.) */
326 /* If y is neither Infinity nor NaN, return x. */
328 /* Both x and y are either NaN or Inf, so the result is NaN. */
329 movi a4, 0x80000 /* make it a quiet NaN */
334 /* Negate y and return it. */
340 .Lsub_opposite_signs:
341 /* Operand signs differ. Do an addition. */
348 .type __subdf3, @function
353 /* Check if the two operands have the same sign. */
355 bltz a7, .Lsub_opposite_signs
358 /* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */
359 ball xh, a6, .Lsub_xnan_or_inf
360 ball yh, a6, .Lsub_ynan_or_inf
362 /* Compare the operands. In contrast to addition, the entire
363 value matters here. */
366 bltu xh, yh, .Lsub_xsmaller
367 beq xh, yh, .Lsub_compare_low
370 /* Check if the smaller (or equal) exponent is zero. */
371 bnone yh, a6, .Lsub_yexpzero
373 /* Replace yh sign/exponent with 0x001. */
379 /* Compute the exponent difference. Optimize for difference < 32. */
381 bgeui a10, 32, .Lsub_bigshifty
383 /* Shift yh/yl right by the exponent difference. Any bits that are
384 shifted out of yl are saved in a9 for rounding the result. */
392 /* Do the 64-bit subtraction. */
398 /* Subtract the leftover bits in a9 from zero and propagate any
399 borrow from xh/xl. */
406 /* Check if the subtract underflowed into the exponent. */
407 extui a10, xh, 20, 11
408 beq a10, a7, .Lsub_round
412 /* The high words are equal. Compare the low words. */
413 bltu xl, yl, .Lsub_xsmaller
414 bltu yl, xl, .Lsub_ysmaller
415 /* The operands are equal. Return 0.0. */
421 /* y is a subnormal value. Replace its sign/exponent with zero,
422 i.e., no implicit "1.0". Unless x is also a subnormal, increment
423 y's apparent exponent because subnormals behave as if they had
424 the minimum (nonzero) exponent. */
427 bnone xh, a6, .Lsub_yexpdiff
432 /* Exponent difference > 64 -- just return the bigger value. */
435 /* Shift yh/yl right by the exponent difference. Any bits that are
436 shifted out are saved in a9 for rounding the result. */
438 sll a11, yl /* lost bits shifted out of yl */
443 or a9, a9, a10 /* any positive, nonzero value will work */
447 /* Same thing as the "ysmaller" code, but with x and y swapped and
449 bnone xh, a6, .Lsub_xexpzero
457 bgeui a10, 32, .Lsub_bigshiftx
475 /* Subtract the leftover bits in a9 from zero and propagate any
476 borrow from xh/xl. */
483 /* Check if the subtract underflowed into the exponent. */
484 extui a10, xh, 20, 11
485 bne a10, a8, .Lsub_borrow
488 /* Round up if the leftover fraction is >= 1/2. */
491 beqz xl, .Lsub_roundcarry
493 /* Check if the leftover fraction is exactly 1/2. */
495 beqz a9, .Lsub_exactlyhalf
499 /* Same as "yexpzero". */
502 bnone yh, a6, .Lsub_xexpdiff
507 /* Mostly the same thing as "bigshifty", but with the sign bit of the
508 shifted value set so that the subsequent subtraction flips the
510 bgeui a10, 64, .Lsub_returny
516 slli xh, a6, 11 /* set sign bit of xh */
522 /* Negate and return y. */
529 /* The subtraction has underflowed into the exponent field, so the
530 value needs to be renormalized. Shift the mantissa left as
531 needed to remove any leading zeros and adjust the exponent
532 accordingly. If the exponent is not large enough to remove
533 all the leading zeros, the result will be a subnormal value. */
536 beqz a8, .Lsub_xhzero
537 do_nsau a6, a8, a7, a11
539 bge a6, a10, .Lsub_subnormal
543 /* Shift the mantissa (a8/xl/a9) left by a6. */
549 /* Combine the shifted mantissa with the sign and exponent,
550 decrementing the exponent by a6. (The exponent has already
551 been decremented by one due to the borrow from the subtraction,
552 but adding the mantissa will increment the exponent by one.) */
560 /* Round down to the nearest even value. */
566 /* xl is always zero when the rounding increment overflows, so
567 there's no need to round it to an even value. */
569 /* Overflow to the exponent is OK. */
573 /* When normalizing the result, all the mantissa bits in the high
574 word are zero. Shift by "20 + (leading zero count of xl) + 1". */
575 do_nsau a6, xl, a7, a11
577 blt a10, a6, .Lsub_subnormal
579 .Lsub_normalize_shift:
580 bltui a6, 32, .Lsub_shift_lt32
594 /* The exponent is too small to shift away all the leading zeros.
595 Set a6 to the current exponent (which has already been
596 decremented by the borrow) so that the exponent of the result
597 will be zero. Do not add 1 to a6 in this case, because: (1)
598 adding the mantissa will not increment the exponent, so there is
599 no need to subtract anything extra from the exponent to
600 compensate, and (2) the effective exponent of a subnormal is 1
601 not 0 so the shift amount must be 1 smaller than normal. */
603 j .Lsub_normalize_shift
605 #endif /* L_addsubdf3 */
610 #if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
611 #define XCHAL_NO_MUL 1
616 /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
617 (This code is placed before the start of the function just to
618 keep it in range of the limited branch displacements.) */
621 /* Clear the sign bit of x. */
625 /* If x is zero, return zero. */
627 beqz a10, .Lmul_return_zero
629 /* Normalize x. Adjust the exponent in a8. */
630 beqz xh, .Lmul_xh_zero
631 do_nsau a10, xh, a11, a12
640 do_nsau a10, xl, a11, a12
645 bltz a10, .Lmul_xl_srl
655 /* Clear the sign bit of y. */
659 /* If y is zero, return zero. */
661 beqz a10, .Lmul_return_zero
663 /* Normalize y. Adjust the exponent in a9. */
664 beqz yh, .Lmul_yh_zero
665 do_nsau a10, yh, a11, a12
674 do_nsau a10, yl, a11, a12
679 bltz a10, .Lmul_yl_srl
689 /* Return zero with the appropriate sign bit. */
696 /* If y is zero, return NaN. */
700 movi a4, 0x80000 /* make it a quiet NaN */
704 /* If y is NaN, return y. */
705 bnall yh, a6, .Lmul_returnx
708 beqz a8, .Lmul_returnx
715 /* Set the sign bit and return. */
723 /* If x is zero, return NaN. */
724 bnez xl, .Lmul_returny
726 bnez a8, .Lmul_returny
727 movi a7, 0x80000 /* make it a quiet NaN */
733 .type __muldf3, @function
735 #if __XTENSA_CALL0_ABI__
743 /* This is not really a leaf function; allocate enough stack space
744 to allow CALL12s to a helper function. */
751 /* Get the sign of the result. */
754 /* Check for NaN and infinity. */
755 ball xh, a6, .Lmul_xnan_or_inf
756 ball yh, a6, .Lmul_ynan_or_inf
758 /* Extract the exponents. */
762 beqz a8, .Lmul_xexpzero
764 beqz a9, .Lmul_yexpzero
767 /* Add the exponents. */
770 /* Replace sign/exponent fields with explicit "1.0". */
777 /* Multiply 64x64 to 128 bits. The result ends up in xh/xl/a6.
778 The least-significant word of the result is thrown away except
779 that if it is nonzero, the lsb of a6 is set to 1. */
780 #if XCHAL_HAVE_MUL32_HIGH
782 /* Compute a6 with any carry-outs in a10. */
795 /* If the low word of the result is nonzero, set the lsb of a6. */
801 /* Compute xl with any carry-outs in a9. */
822 #else /* ! XCHAL_HAVE_MUL32_HIGH */
824 /* Break the inputs into 16-bit chunks and compute 16 32-bit partial
825 products. These partial products are:
850 where the input chunks are (hh, hl, lh, ll). If using the Mul16
851 or Mul32 multiplier options, these input chunks must be stored in
852 separate registers. For Mac16, the UMUL.AA.* opcodes can specify
853 that the inputs come from either half of the registers, so there
854 is no need to shift them out ahead of time. If there is no
855 multiply hardware, the 16-bit chunks can be extracted when setting
856 up the arguments to the separate multiply function. */
858 /* Save a7 since it is needed to hold a temporary value. */
860 #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
861 /* Calling a separate multiply function will clobber a0 and requires
862 use of a8 as a temporary, so save those values now. (The function
863 uses a custom ABI so nothing else needs to be saved.) */
868 #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32
875 /* Get the high halves of the inputs into registers. */
886 #if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16
887 /* Clear the high halves of the inputs. This does not matter
888 for MUL16 because the high bits are ignored. */
894 #endif /* MUL16 || MUL32 */
899 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
900 mul16u dst, xreg ## xhalf, yreg ## yhalf
902 #elif XCHAL_HAVE_MUL32
904 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
905 mull dst, xreg ## xhalf, yreg ## yhalf
907 #elif XCHAL_HAVE_MAC16
909 /* The preprocessor insists on inserting a space when concatenating after
910 a period in the definition of do_mul below. These macros are a workaround
911 using underscores instead of periods when doing the concatenation. */
912 #define umul_aa_ll umul.aa.ll
913 #define umul_aa_lh umul.aa.lh
914 #define umul_aa_hl umul.aa.hl
915 #define umul_aa_hh umul.aa.hh
917 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
918 umul_aa_ ## xhalf ## yhalf xreg, yreg; \
921 #else /* no multiply hardware */
923 #define set_arg_l(dst, src) \
924 extui dst, src, 0, 16
925 #define set_arg_h(dst, src) \
928 #if __XTENSA_CALL0_ABI__
929 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
930 set_arg_ ## xhalf (a13, xreg); \
931 set_arg_ ## yhalf (a14, yreg); \
932 call0 .Lmul_mulsi3; \
935 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \
936 set_arg_ ## xhalf (a14, xreg); \
937 set_arg_ ## yhalf (a15, yreg); \
938 call12 .Lmul_mulsi3; \
940 #endif /* __XTENSA_CALL0_ABI__ */
942 #endif /* no multiply hardware */
944 /* Add pp1 and pp2 into a10 with carry-out in a9. */
945 do_mul(a10, xl, l, yl, h) /* pp 1 */
946 do_mul(a11, xl, h, yl, l) /* pp 2 */
952 /* Initialize a6 with a9/a10 shifted into position. Note that
953 this value can be safely incremented without any carry-outs. */
957 /* Compute the low word into a10. */
958 do_mul(a11, xl, l, yl, l) /* pp 0 */
964 /* Compute the contributions of pp0-5 to a6, with carry-outs in a9.
965 This is good enough to determine the low half of a6, so that any
966 nonzero bits from the low word of the result can be collapsed
967 into a6, freeing up a register. */
969 do_mul(a11, xl, l, yh, l) /* pp 3 */
974 do_mul(a11, xl, h, yl, h) /* pp 4 */
979 do_mul(a11, xh, l, yl, l) /* pp 5 */
984 /* Collapse any nonzero bits from the low word into a6. */
989 /* Add pp6-9 into a11 with carry-outs in a10. */
990 do_mul(a7, xl, l, yh, h) /* pp 6 */
991 do_mul(a11, xh, h, yl, l) /* pp 9 */
997 do_mul(a7, xl, h, yh, l) /* pp 7 */
1002 do_mul(a7, xh, l, yl, h) /* pp 8 */
1007 /* Shift a10/a11 into position, and add low half of a11 to a6. */
1015 /* Add pp10-12 into xl with carry-outs in a9. */
1017 do_mul(xl, xl, h, yh, h) /* pp 10 */
1022 do_mul(a10, xh, l, yh, l) /* pp 11 */
1027 do_mul(a10, xh, h, yl, h) /* pp 12 */
1032 /* Add pp13-14 into a11 with carry-outs in a10. */
1033 do_mul(a11, xh, l, yh, h) /* pp 13 */
1034 do_mul(a7, xh, h, yh, l) /* pp 14 */
1040 /* Shift a10/a11 into position, and add low half of a11 to a6. */
1049 do_mul(xh, xh, h, yh, h) /* pp 15 */
1052 /* Restore values saved on the stack during the multiplication. */
1054 #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL
1058 #endif /* ! XCHAL_HAVE_MUL32_HIGH */
1060 /* Shift left by 12 bits, unless there was a carry-out from the
1061 multiply, in which case, shift by 11 bits and increment the
1062 exponent. Note: It is convenient to use the constant 0x3ff
1063 instead of 0x400 when removing the extra exponent bias (so that
1064 it is easy to construct 0x7fe for the overflow check). Reverse
1065 the logic here to decrement the exponent sum by one unless there
1068 srli a5, xh, 21 - 12
1077 /* Subtract the extra bias from the exponent sum (plus one to account
1078 for the explicit "1.0" of the mantissa that will be added to the
1079 exponent in the final result). */
1083 /* Check for over/underflow. The value in a8 is one less than the
1084 final exponent, so values in the range 0..7fd are OK here. */
1085 slli a4, a4, 1 /* 0x7fe */
1086 bgeu a8, a4, .Lmul_overflow
1090 bgez a6, .Lmul_rounded
1092 beqz xl, .Lmul_roundcarry
1094 beqz a6, .Lmul_exactlyhalf
1097 /* Add the exponent to the mantissa. */
1102 /* Add the sign bit. */
1108 #if __XTENSA_CALL0_ABI__
1118 /* Round down to the nearest even value. */
1124 /* xl is always zero when the rounding increment overflows, so
1125 there's no need to round it to an even value. */
1127 /* Overflow is OK -- it will be added to the exponent. */
1131 bltz a8, .Lmul_underflow
1132 /* Return +/- Infinity. */
1133 addi a8, a4, 1 /* 0x7ff */
1139 /* Create a subnormal value, where the exponent field contains zero,
1140 but the effective exponent is 1. The value of a8 is one less than
1141 the actual exponent, so just negate it to get the shift amount. */
1145 bgeui a8, 32, .Lmul_bigshift
1147 /* Shift xh/xl right. Any bits that are shifted out of xl are saved
1148 in a6 (combined with the shifted-out bits currently in a6) for
1149 rounding the result. */
1156 bgeui a8, 64, .Lmul_flush_to_zero
1157 sll a10, xl /* lost bits shifted out of xl */
1163 /* Set the exponent to zero. */
1166 /* Pack any nonzero bits shifted out into a6. */
1167 beqz a9, .Lmul_round
1172 .Lmul_flush_to_zero:
1173 /* Return zero with the appropriate sign bit. */
1181 /* For Xtensa processors with no multiply hardware, this simplified
1182 version of _mulsi3 is used for multiplying 16-bit chunks of
1183 the floating-point mantissas. When using CALL0, this function
1184 uses a custom ABI: the inputs are passed in a13 and a14, the
1185 result is returned in a12, and a8 and a15 are clobbered. */
1189 .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2
1191 1: add \tmp1, \src2, \dst
1192 extui \tmp2, \src1, 0, 1
1193 movnez \dst, \tmp1, \tmp2
1195 do_addx2 \tmp1, \src2, \dst, \tmp1
1196 extui \tmp2, \src1, 1, 1
1197 movnez \dst, \tmp1, \tmp2
1199 do_addx4 \tmp1, \src2, \dst, \tmp1
1200 extui \tmp2, \src1, 2, 1
1201 movnez \dst, \tmp1, \tmp2
1203 do_addx8 \tmp1, \src2, \dst, \tmp1
1204 extui \tmp2, \src1, 3, 1
1205 movnez \dst, \tmp1, \tmp2
1207 srli \src1, \src1, 4
1208 slli \src2, \src2, 4
1211 #if __XTENSA_CALL0_ABI__
1212 mul_mulsi3_body a12, a13, a14, a15, a8
1214 /* The result will be written into a2, so save that argument in a4. */
1216 mul_mulsi3_body a2, a4, a3, a5, a6
1219 #endif /* XCHAL_NO_MUL */
1220 #endif /* L_muldf3 */
1227 /* Handle unusual cases (zeros, subnormals, NaNs and Infinities).
1228 (This code is placed before the start of the function just to
1229 keep it in range of the limited branch displacements.) */
1232 /* Clear the sign bit of y. */
1236 /* Check for division by zero. */
1238 beqz a10, .Ldiv_yzero
1240 /* Normalize y. Adjust the exponent in a9. */
1241 beqz yh, .Ldiv_yh_zero
1242 do_nsau a10, yh, a11, a9
1251 do_nsau a10, yl, a11, a9
1256 bltz a10, .Ldiv_yl_srl
1266 /* y is zero. Return NaN if x is also zero; otherwise, infinity. */
1274 movi a4, 0x80000 /* make it a quiet NaN */
1280 /* Clear the sign bit of x. */
1284 /* If x is zero, return zero. */
1286 beqz a10, .Ldiv_return_zero
1288 /* Normalize x. Adjust the exponent in a8. */
1289 beqz xh, .Ldiv_xh_zero
1290 do_nsau a10, xh, a11, a8
1299 do_nsau a10, xl, a11, a8
1304 bltz a10, .Ldiv_xl_srl
1314 /* Return zero with the appropriate sign bit. */
1321 /* Set the sign bit of the result. */
1325 /* If y is NaN or Inf, return NaN. */
1327 movi a4, 0x80000 /* make it a quiet NaN */
1332 /* If y is Infinity, return zero. */
1335 beqz a8, .Ldiv_return_zero
1336 /* y is NaN; return it. */
1347 .type __divdf3, @function
1352 /* Get the sign of the result. */
1355 /* Check for NaN and infinity. */
1356 ball xh, a6, .Ldiv_xnan_or_inf
1357 ball yh, a6, .Ldiv_ynan_or_inf
1359 /* Extract the exponents. */
1360 extui a8, xh, 20, 11
1361 extui a9, yh, 20, 11
1363 beqz a9, .Ldiv_yexpzero
1365 beqz a8, .Ldiv_xexpzero
1368 /* Subtract the exponents. */
1371 /* Replace sign/exponent fields with explicit "1.0". */
1378 /* Set SAR for left shift by one. */
1381 /* The first digit of the mantissa division must be a one.
1382 Shift x (and adjust the exponent) as needed to make this true. */
1384 beq yh, xh, .Ldiv_highequal1
1389 /* Do the first subtraction and shift. */
1397 /* Put the quotient into a10/a11. */
1401 /* Divide one bit at a time for 52 bits. */
1403 #if XCHAL_HAVE_LOOPS
1404 loop a9, .Ldiv_loopend
1407 /* Shift the quotient << 1. */
1411 /* Is this digit a 0 or 1? */
1413 beq xh, yh, .Ldiv_highequal2
1415 /* Output a 1 and subtract. */
1422 /* Shift the dividend << 1. */
1426 #if !XCHAL_HAVE_LOOPS
1432 /* Add the exponent bias (less one to account for the explicit "1.0"
1433 of the mantissa that will be added to the exponent in the final
1438 /* Check for over/underflow. The value in a8 is one less than the
1439 final exponent, so values in the range 0..7fd are OK here. */
1440 addmi a9, a9, 0x400 /* 0x7fe */
1441 bgeu a8, a9, .Ldiv_overflow
1444 /* Round. The remainder (<< 1) is in xh/xl. */
1445 bltu xh, yh, .Ldiv_rounded
1446 beq xh, yh, .Ldiv_highequal3
1449 beqz a11, .Ldiv_roundcarry
1453 /* Add the exponent to the mantissa. */
1458 /* Add the sign bit. */
1469 bltu xl, yl, .Ldiv_rounded
1470 bne xl, yl, .Ldiv_roundup
1472 /* Remainder is exactly half the divisor. Round even. */
1474 beqz a11, .Ldiv_roundcarry
1480 bltz a8, .Ldiv_underflow
1481 /* Return +/- Infinity. */
1482 addi a8, a9, 1 /* 0x7ff */
1488 /* Create a subnormal value, where the exponent field contains zero,
1489 but the effective exponent is 1. The value of a8 is one less than
1490 the actual exponent, so just negate it to get the shift amount. */
1493 bgeui a8, 32, .Ldiv_bigshift
1495 /* Shift a10/a11 right. Any bits that are shifted out of a11 are
1496 saved in a6 for rounding the result. */
1503 bgeui a8, 64, .Ldiv_flush_to_zero
1504 sll a9, a11 /* lost bits shifted out of a11 */
1510 /* Set the exponent to zero. */
1513 /* Pack any nonzero remainder (in xh/xl) into a6. */
1519 /* Round a10/a11 based on the bits shifted out into a6. */
1520 1: bgez a6, .Ldiv_rounded
1522 beqz a11, .Ldiv_roundcarry
1524 bnez a6, .Ldiv_rounded
1530 /* a11 is always zero when the rounding increment overflows, so
1531 there's no need to round it to an even value. */
1533 /* Overflow to the exponent field is OK. */
1536 .Ldiv_flush_to_zero:
1537 /* Return zero with the appropriate sign bit. */
1543 #endif /* L_divdf3 */
1547 /* Equal and Not Equal */
1552 .set __nedf2, __eqdf2
1553 .type __eqdf2, @function
1559 /* The values are equal but NaN != NaN. Check the exponent. */
1571 /* Check if the mantissas are nonzero. */
1576 /* Check if x and y are zero with different signs. */
1579 or a7, a7, xl /* xl == yl here */
1581 /* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa
1582 or x when exponent(x) = 0x7ff and x == y. */
1593 .type __gtdf2, @function
1598 1: bnall yh, a6, .Lle_cmp
1600 /* Check if y is a NaN. */
1607 /* Check if x is a NaN. */
1615 /* Less Than or Equal */
1619 .type __ledf2, @function
1624 1: bnall yh, a6, .Lle_cmp
1626 /* Check if y is a NaN. */
1633 /* Check if x is a NaN. */
1641 /* Check if x and y have different signs. */
1643 bltz a7, .Lle_diff_signs
1645 /* Check if x is negative. */
1648 /* Check if x <= y. */
1656 /* Check if y <= x. */
1666 /* Check if both x and y are zero. */
1677 /* Greater Than or Equal */
1681 .type __gedf2, @function
1686 1: bnall yh, a6, .Llt_cmp
1688 /* Check if y is a NaN. */
1695 /* Check if x is a NaN. */
1707 .type __ltdf2, @function
1712 1: bnall yh, a6, .Llt_cmp
1714 /* Check if y is a NaN. */
1721 /* Check if x is a NaN. */
1729 /* Check if x and y have different signs. */
1731 bltz a7, .Llt_diff_signs
1733 /* Check if x is negative. */
1736 /* Check if x < y. */
1744 /* Check if y < x. */
1754 /* Check if both x and y are nonzero. */
1769 .type __unorddf2, @function
1790 #endif /* L_cmpdf2 */
1796 .type __fixdfsi, @function
1800 /* Check for NaN and Infinity. */
1802 ball xh, a6, .Lfixdfsi_nan_or_inf
1804 /* Extract the exponent and check if 0 < (exp - 0x3fe) < 32. */
1805 extui a4, xh, 20, 11
1806 extui a5, a6, 19, 10 /* 0x3fe */
1808 bgei a4, 32, .Lfixdfsi_maxint
1809 blti a4, 1, .Lfixdfsi_zero
1811 /* Add explicit "1.0" and shift << 11. */
1816 /* Shift back to the right, based on the exponent. */
1817 ssl a4 /* shift by 32 - a4 */
1820 /* Negate the result if sign != 0. */
1825 .Lfixdfsi_nan_or_inf:
1826 /* Handle Infinity and NaN. */
1829 beqz a4, .Lfixdfsi_maxint
1831 /* Translate NaN to +maxint. */
1835 slli a4, a6, 11 /* 0x80000000 */
1836 addi a5, a4, -1 /* 0x7fffffff */
1845 #endif /* L_fixdfsi */
1851 .type __fixdfdi, @function
1855 /* Check for NaN and Infinity. */
1857 ball xh, a6, .Lfixdfdi_nan_or_inf
1859 /* Extract the exponent and check if 0 < (exp - 0x3fe) < 64. */
1860 extui a4, xh, 20, 11
1861 extui a5, a6, 19, 10 /* 0x3fe */
1863 bgei a4, 64, .Lfixdfdi_maxint
1864 blti a4, 1, .Lfixdfdi_zero
1866 /* Add explicit "1.0" and shift << 11. */
1872 /* Shift back to the right, based on the exponent. */
1873 ssl a4 /* shift by 64 - a4 */
1874 bgei a4, 32, .Lfixdfdi_smallshift
1879 /* Negate the result if sign != 0. */
1887 .Lfixdfdi_smallshift:
1892 .Lfixdfdi_nan_or_inf:
1893 /* Handle Infinity and NaN. */
1896 beqz a4, .Lfixdfdi_maxint
1898 /* Translate NaN to +maxint. */
1902 slli a7, a6, 11 /* 0x80000000 */
1908 1: addi xh, a7, -1 /* 0x7fffffff */
1917 #endif /* L_fixdfdi */
1922 .global __fixunsdfsi
1923 .type __fixunsdfsi, @function
1927 /* Check for NaN and Infinity. */
1929 ball xh, a6, .Lfixunsdfsi_nan_or_inf
1931 /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 32. */
1932 extui a4, xh, 20, 11
1933 extui a5, a6, 20, 10 /* 0x3ff */
1935 bgei a4, 32, .Lfixunsdfsi_maxint
1936 bltz a4, .Lfixunsdfsi_zero
1938 /* Add explicit "1.0" and shift << 11. */
1943 /* Shift back to the right, based on the exponent. */
1945 beqi a4, 32, .Lfixunsdfsi_bigexp
1946 ssl a4 /* shift by 32 - a4 */
1949 /* Negate the result if sign != 0. */
1954 .Lfixunsdfsi_nan_or_inf:
1955 /* Handle Infinity and NaN. */
1958 beqz a4, .Lfixunsdfsi_maxint
1960 /* Translate NaN to 0xffffffff. */
1964 .Lfixunsdfsi_maxint:
1965 slli a4, a6, 11 /* 0x80000000 */
1966 movi a5, -1 /* 0xffffffff */
1975 .Lfixunsdfsi_bigexp:
1976 /* Handle unsigned maximum exponent case. */
1978 mov a2, a5 /* no shift needed */
1981 /* Return 0x80000000 if negative. */
1985 #endif /* L_fixunsdfsi */
1990 .global __fixunsdfdi
1991 .type __fixunsdfdi, @function
1995 /* Check for NaN and Infinity. */
1997 ball xh, a6, .Lfixunsdfdi_nan_or_inf
1999 /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 64. */
2000 extui a4, xh, 20, 11
2001 extui a5, a6, 20, 10 /* 0x3ff */
2003 bgei a4, 64, .Lfixunsdfdi_maxint
2004 bltz a4, .Lfixunsdfdi_zero
2006 /* Add explicit "1.0" and shift << 11. */
2012 /* Shift back to the right, based on the exponent. */
2014 beqi a4, 64, .Lfixunsdfdi_bigexp
2015 ssl a4 /* shift by 64 - a4 */
2016 bgei a4, 32, .Lfixunsdfdi_smallshift
2020 .Lfixunsdfdi_shifted:
2021 /* Negate the result if sign != 0. */
2029 .Lfixunsdfdi_smallshift:
2032 j .Lfixunsdfdi_shifted
2034 .Lfixunsdfdi_nan_or_inf:
2035 /* Handle Infinity and NaN. */
2038 beqz a4, .Lfixunsdfdi_maxint
2040 /* Translate NaN to 0xffffffff.... */
2045 .Lfixunsdfdi_maxint:
2047 2: slli xh, a6, 11 /* 0x80000000 */
2056 .Lfixunsdfdi_bigexp:
2057 /* Handle unsigned maximum exponent case. */
2059 leaf_return /* no shift needed */
2061 #endif /* L_fixunsdfdi */
2066 .global __floatunsidf
2067 .type __floatunsidf, @function
2070 beqz a2, .Lfloatsidf_return_zero
2072 /* Set the sign to zero and jump to the floatsidf code. */
2074 j .Lfloatsidf_normalize
2078 .type __floatsidf, @function
2082 /* Check for zero. */
2083 beqz a2, .Lfloatsidf_return_zero
2085 /* Save the sign. */
2088 /* Get the absolute value. */
2096 .Lfloatsidf_normalize:
2097 /* Normalize with the first 1 bit in the msb. */
2098 do_nsau a4, a2, a5, a6
2102 /* Shift the mantissa into position. */
2104 slli xl, a5, (32 - 11)
2106 /* Set the exponent. */
2107 movi a5, 0x41d /* 0x3fe + 31 */
2112 /* Add the sign and return. */
2117 .Lfloatsidf_return_zero:
2121 #endif /* L_floatsidf */
2126 .global __floatundidf
2127 .type __floatundidf, @function
2131 /* Check for zero. */
2135 /* Set the sign to zero and jump to the floatdidf code. */
2137 j .Lfloatdidf_normalize
2141 .type __floatdidf, @function
2145 /* Check for zero. */
2149 /* Save the sign. */
2152 /* Get the absolute value. */
2153 bgez xh, .Lfloatdidf_normalize
2156 beqz xl, .Lfloatdidf_normalize
2159 .Lfloatdidf_normalize:
2160 /* Normalize with the first 1 bit in the msb of xh. */
2161 beqz xh, .Lfloatdidf_bigshift
2162 do_nsau a4, xh, a5, a6
2167 .Lfloatdidf_shifted:
2168 /* Shift the mantissa into position, with rounding bits in a6. */
2174 /* Set the exponent. */
2175 movi a5, 0x43d /* 0x3fe + 63 */
2184 /* Round up if the leftover fraction is >= 1/2. */
2187 beqz xl, .Lfloatdidf_roundcarry
2189 /* Check if the leftover fraction is exactly 1/2. */
2191 beqz a6, .Lfloatdidf_exactlyhalf
2194 .Lfloatdidf_bigshift:
2195 /* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
2196 do_nsau a4, xl, a5, a6
2201 j .Lfloatdidf_shifted
2203 .Lfloatdidf_exactlyhalf:
2204 /* Round down to the nearest even value. */
2209 .Lfloatdidf_roundcarry:
2210 /* xl is always zero when the rounding increment overflows, so
2211 there's no need to round it to an even value. */
2213 /* Overflow to the exponent is OK. */
2216 #endif /* L_floatdidf */
2221 .global __truncdfsf2
2222 .type __truncdfsf2, @function
2226 /* Adjust the exponent bias. */
2227 movi a4, (0x3ff - 0x7f) << 20
2230 /* Check for underflow. */
2232 bltz a6, .Ltrunc_underflow
2233 extui a6, a5, 20, 11
2234 beqz a6, .Ltrunc_underflow
2236 /* Check for overflow. */
2238 bge a6, a4, .Ltrunc_overflow
2240 /* Shift a5/xl << 3 into a5/a4. */
2246 /* Add the sign bit. */
2251 /* Round up if the leftover fraction is >= 1/2. */
2254 /* Overflow to the exponent is OK. The answer will be correct. */
2256 /* Check if the leftover fraction is exactly 1/2. */
2258 beqz a4, .Ltrunc_exactlyhalf
2261 .Ltrunc_exactlyhalf:
2262 /* Round down to the nearest even value. */
2268 /* Check if exponent == 0x7ff. */
2272 /* Check if mantissa is nonzero. */
2277 /* Shift a4 to set a bit in the mantissa, making a quiet NaN. */
2280 1: slli a4, a4, 4 /* 0xff000000 or 0xff800000 */
2281 /* Add the sign bit. */
2288 /* Find shift count for a subnormal. Flush to zero if >= 32. */
2289 extui a6, xh, 20, 11
2290 movi a5, 0x3ff - 0x7f
2295 /* Replace the exponent with an explicit "1.0". */
2296 slli a5, a5, 13 /* 0x700000 */
2301 /* Shift the mantissa left by 3 bits (into a5/a4). */
2306 /* Shift right by a6. */
2311 beqz a7, .Ltrunc_addsign
2312 or a4, a4, a6 /* any positive, nonzero value will work */
2315 /* Return +/- zero. */
2316 1: extui a2, xh, 31, 1
2320 #endif /* L_truncdfsf2 */
2322 #ifdef L_extendsfdf2
2325 .global __extendsfdf2
2326 .type __extendsfdf2, @function
2330 /* Save the sign bit and then shift it off. */
2335 /* Extract and check the exponent. */
2337 beqz a6, .Lextend_expzero
2339 beqi a6, 256, .Lextend_nan_or_inf
2341 /* Shift >> 3 into a4/xl. */
2343 slli xl, a2, (32 - 3)
2345 /* Adjust the exponent bias. */
2346 movi a6, (0x3ff - 0x7f) << 20
2349 /* Add the sign bit. */
2353 .Lextend_nan_or_inf:
2356 /* Check for NaN. */
2360 slli a6, a6, 11 /* 0x80000 */
2363 /* Add the sign and return. */
2371 /* Normalize it to have 8 zero bits before the first 1 bit. */
2372 do_nsau a7, a4, a2, a3
2377 /* Shift >> 3 into a4/xl. */
2378 slli xl, a4, (32 - 3)
2381 /* Set the exponent. */
2382 movi a6, 0x3fe - 0x7f
2387 /* Add the sign and return. */
2391 #endif /* L_extendsfdf2 */