1 /* mculib libgcc routines of Andes NDS32 cpu for GNU compiler
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by Andes Technology Corporation.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 .section .mdebug.abi_nds32
30 /* ------------------------------------------- */
31 /* FPBIT floating point operations for libgcc */
32 /* ------------------------------------------- */
39 .type __subsf3, @function
50 .type __addsf3, @function
78 sltsi $r15, $r8, #0x20
92 sltsi $r15, $r8, #0x20
119 subri $r15, $r5, #0xff
136 #ifdef __NDS32_PERF_EXT__
177 slti $r15, $r2, #0x20
179 move $r9, #0x80000000
181 subri $r9, $r2, #0x20
198 #ifdef __NDS32_PERF_EXT__
211 subri $r15, $r7, #0xff
213 move $r9, #0x80000000
214 bne $r1, $r9, .LEretB
218 subri $r15, $r7, #0xff
229 #ifdef __NDS32_PERF_EXT__
257 move $r0, #0xffc00000
259 .size __subsf3, .-__subsf3
260 .size __addsf3, .-__addsf3
261 #endif /* L_addsub_sf */
270 .type __fixsfsi, @function
275 move $r3, #0x80000000
279 subri $r2, $r3, #0x9e
281 sltsi $r15, $r2, #0x20
298 move $r3, #0x7f800000
301 move $r0, #0x80000000
304 move $r0, #0x7fffffff
306 .size __fixsfsi, .-__fixsfsi
307 #endif /* L_sf_to_si */
316 .type __divsi3, @function
318 ! ---------------------------------------------------------------------
324 ! ---------------------------------------------------------------------
325 sltsi $r5, $r0, 0 ! $r5 <- neg = (a < 0) ? 1 : 0
326 subri $r4, $r0, 0 ! $r4 <- a = -a
327 cmovn $r0, $r4, $r5 ! $r0 <- a = neg ? -a : a
329 ! ---------------------------------------------------------------------
331 ! ---------------------------------------------------------------------
332 bgez $r1, .L3 ! if b >= 0, skip
333 ! ---------------------------------------------------------------------
337 ! ---------------------------------------------------------------------
338 subri $r1, $r1, 0 ! $r1 <- b = -b
339 subri $r5, $r5, 1 ! $r5 <- neg = !neg
341 ! ---------------------------------------------------------------------
342 !!res = udivmodsi4 (a, b, 1);
345 ! ---------------------------------------------------------------------
346 movi $r2, 0 ! $r2 <- res = 0
347 beqz $r1, .L1 ! if den == 0, skip
348 ! ---------------------------------------------------------------------
350 ! ---------------------------------------------------------------------
351 movi $r4, 1 ! $r4 <- bit = 1
352 #ifndef __OPTIMIZE_SIZE__
355 ! ---------------------------------------------------------------------
356 ! while (den < num && bit && !(den & (1L << 31)))
357 ! ---------------------------------------------------------------------
358 slt $ta, $r1, $r0 ! $ta <- den < num ?
359 beqz $ta, .L5 ! if no, skip
360 ! ---------------------------------------------------------------------
364 ! ---------------------------------------------------------------------
365 #if defined (__OPTIMIZE_SIZE__) && !defined (__NDS32_ISA_V3M__)
366 clz $r3, $r1 ! $r3 <- leading zero count for den
367 clz $ta, $r0 ! $ta <- leading zero count for num
368 sub $r3, $r3, $ta ! $r3 <- number of bits to shift
369 sll $r1, $r1, $r3 ! $r1 <- den
370 sll $r4, $r4, $r3 ! $r2 <- bit
372 slli $r1, $r1, 1 ! $r1 <- den << = 1
373 slli $r4, $r4, 1 ! $r4 <- bit << = 1
374 b .L6 ! continue loop
377 ! ---------------------------------------------------------------------
380 ! ---------------------------------------------------------------------
381 slt $ta, $r0, $r1 ! $ta <- num < den ?
382 bnez $ta, .L9 ! if yes, skip
383 ! ---------------------------------------------------------------------
387 ! ---------------------------------------------------------------------
388 sub $r0, $r0, $r1 ! $r0 <- num -= den
389 or $r2, $r2, $r4 ! $r2 <- res |= bit
391 ! ---------------------------------------------------------------------
398 ! ---------------------------------------------------------------------
399 srli $r4, $r4, 1 ! $r4 <- bit >> = 1
400 srli $r1, $r1, 1 ! $r1 <- den >> = 1
401 bnez $r4, .L5 ! if bit != 0, continue loop
403 ! ---------------------------------------------------------------------
407 ! ---------------------------------------------------------------------
408 subri $r0, $r2, 0 ! $r0 <- -res
409 cmovz $r0, $r2, $r5 ! $r0 <- neg ? -res : res
410 ! ---------------------------------------------------------------------
412 .size __divsi3, .-__divsi3
413 #endif /* L_divsi3 */
419 !--------------------------------------
420 #ifdef __big_endian__
431 !--------------------------------------
435 .type __divdi3, @function
438 #ifdef __NDS32_ISA_V3M__
441 smw.adm $r6, [$sp], $r10, 2
473 #ifdef __NDS32_ISA_V3M__
476 lmw.bim $r6, [$sp], $r10, 2
479 .size __divdi3, .-__divdi3
480 #endif /* L_divdi3 */
489 .type __modsi3, @function
491 ! ---------------------------------------------------------------------
497 ! ---------------------------------------------------------------------
498 sltsi $r5, $r0, 0 ! $r5 <- neg < 0 ? 1 : 0
499 subri $r4, $r0, 0 ! $r4 <- -a
500 cmovn $r0, $r4, $r5 ! $r0 <- |a|
501 ! ---------------------------------------------------------------------
503 #ifndef __NDS32_PERF_EXT__
504 ! ---------------------------------------------------------------------
505 bgez $r1, .L3 ! if b >= 0, skip
506 ! ---------------------------------------------------------------------
508 ! ---------------------------------------------------------------------
509 subri $r1, $r1, 0 ! $r1 <- |b|
511 ! ---------------------------------------------------------------------
512 !!res = udivmodsi4 (a, b, 1);
514 ! ---------------------------------------------------------------------
515 #else /* __NDS32_PERF_EXT__ */
517 !!res = udivmodsi4 (a, b, 1);
519 ! ---------------------------------------------------------------------
520 abs $r1, $r1 ! $r1 <- |b|
521 #endif /* __NDS32_PERF_EXT__ */
522 beqz $r1, .L1 ! if den == 0, skip
523 ! ---------------------------------------------------------------------
526 ! ---------------------------------------------------------------------
527 movi $r4, 1 ! $r4 <- bit = 1
528 #ifndef __OPTIMIZE_SIZE__
531 ! ---------------------------------------------------------------------
532 ! while (den < num&&bit && !(den & (1L << 31)))
533 ! ---------------------------------------------------------------------
534 slt $ta, $r1, $r0 ! $ta <- den < num ?
535 beqz $ta, .L5 ! if no, skip
536 ! ---------------------------------------------------------------------
540 ! ---------------------------------------------------------------------
541 #if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
542 clz $r3, $r1 ! $r3 <- leading zero count for den
543 clz $ta, $r0 ! $ta <- leading zero count for num
544 sub $r3, $r3, $ta ! $r3 <- number of bits to shift
545 sll $r1, $r1, $r3 ! $r1 <- den
546 sll $r4, $r4, $r3 ! $r2 <- bit
548 slli $r1, $r1, 1 ! $r1 <- den << = 1
549 slli $r4, $r4, 1 ! $r4 <- bit << = 1
550 b .L6 ! continue loop
553 ! ---------------------------------------------------------------------
566 ! ---------------------------------------------------------------------
567 sub $r2, $r0, $r1 ! $r2 <- num - den
568 slt $ta, $r0, $r1 ! $ta <- num < den ?
569 srli $r4, $r4, 1 ! $r4 <- bit >> = 1
570 cmovz $r0, $r2, $ta ! $r0 <- num = (num < den) ? num : num - den
571 srli $r1, $r1, 1 ! $r1 <- den >> = 1
572 bnez $r4, .L5 ! if bit != 0, continue loop
574 ! ---------------------------------------------------------------------
578 ! ---------------------------------------------------------------------
579 subri $r3, $r0, 0 ! $r3 <- -res
580 cmovn $r0, $r3, $r5 ! $r0 <- neg ? -res : res
581 ! ---------------------------------------------------------------------
583 .size __modsi3, .-__modsi3
584 #endif /* L_modsi3 */
590 !--------------------------------------
591 #ifdef __big_endian__
602 !--------------------------------------
606 .type __moddi3, @function
608 ! =====================================================================
610 ! sp+32 +-----------------------+
612 ! sp+28 +-----------------------+
614 ! sp+8 +-----------------------+
616 ! sp+4 +-----------------------+
618 ! sp +-----------------------+
619 ! =====================================================================
621 #ifdef __NDS32_ISA_V3M__
624 smw.adm $r6, [$sp], $r10, 2
628 !------------------------------------------
629 ! __moddi3 (DWtype u, DWtype v)
632 ! DWunion uu = {.ll = u};
633 ! DWunion vv = {.ll = v};
638 !---------------------------------------------
643 movi $r10, 0 ! r10 = c = 0
644 bgez V1H, .L80 ! if u > 0 , go L80
648 movi $r10, -1 ! r10 = c = ~c
649 !------------------------------------------------
652 !----------------------------------------------
654 bgez $r7, .L81 ! if v > 0 , go L81
660 !------------------------------------------
661 ! (void) __udivmoddi4 (uu.ll, vv.ll, &w);
665 !-----------------------------------------
673 lwi $r0, [$sp+(0)] ! le: sp + 0 is low, be: sp + 0 is high
674 lwi $r1, [$sp+(4)] ! le: sp + 4 is low, be: sp + 4 is high
679 #ifdef __NDS32_ISA_V3M__
683 lmw.bim $r6, [$sp], $r10, 2
686 .size __moddi3, .-__moddi3
687 #endif /* L_moddi3 */
696 .type __mulsi3, @function
698 ! ---------------------------------------------------------------------
704 ! ---------------------------------------------------------------------
705 beqz $r0, .L7 ! if a == 0, done
706 move $r2, $r0 ! $r2 <- a
707 movi $r0, 0 ! $r0 <- r <- 0
709 ! ---------------------------------------------------------------------
720 ! ---------------------------------------------------------------------
721 andi $r3, $r2, 1 ! $r3 <- a & 1
722 add $r4, $r0, $r1 ! $r4 <- r += b
723 cmovn $r0, $r4, $r3 ! $r0 <- r
724 srli $r2, $r2, 1 ! $r2 <- a >> = 1
725 slli $r1, $r1, 1 ! $r1 <- b << = 1
726 bnez $r2, .L8 ! if a != 0, continue loop
728 ! ---------------------------------------------------------------------
730 ! ---------------------------------------------------------------------
732 .size __mulsi3, .-__mulsi3
733 #endif /* L_mulsi3 */
742 .type __udivsi3, @function
744 ! ---------------------------------------------------------------------
745 !!res=udivmodsi4(a,b,0);
748 ! ---------------------------------------------------------------------
749 movi $r2, 0 ! $r2 <- res=0
750 beqz $r1, .L1 ! if den==0, skip
751 ! ---------------------------------------------------------------------
753 ! ---------------------------------------------------------------------
754 movi $r4, 1 ! $r4 <- bit=1
755 #ifndef __OPTIMIZE_SIZE__
758 ! ---------------------------------------------------------------------
760 ! ---------------------------------------------------------------------
761 slt $ta, $r1, $r0 ! $ta <- den<num?
762 beqz $ta, .L5 ! if no, skip
763 ! ---------------------------------------------------------------------
764 ! &&bit&&!(den&(1L<<31)))
765 ! ---------------------------------------------------------------------
766 bltz $r1, .L5 ! if den<0, skip
767 ! ---------------------------------------------------------------------
771 ! ---------------------------------------------------------------------
772 #if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
773 clz $r3, $r1 ! $r3 <- leading zero count for den
774 clz $ta, $r0 ! $ta <- leading zero count for num
775 sub $r3, $r3, $ta ! $r3 <- number of bits to shift
776 sll $r1, $r1, $r3 ! $r1 <- den
777 sll $r2, $r2, $r3 ! $r2 <- bit
779 slli $r1, $r1, 1 ! $r1 <- den<<=1
780 slli $r4, $r4, 1 ! $r4 <- bit<<=1
781 b .L6 ! continue loop
784 ! ---------------------------------------------------------------------
787 ! ---------------------------------------------------------------------
788 slt $ta, $r0, $r1 ! $ta <- num<den?
789 bnez $ta, .L9 ! if yes, skip
790 ! ---------------------------------------------------------------------
794 ! ---------------------------------------------------------------------
795 sub $r0, $r0, $r1 ! $r0 <- num-=den
796 or $r2, $r2, $r4 ! $r2 <- res|=bit
798 ! ---------------------------------------------------------------------
806 ! ---------------------------------------------------------------------
807 srli $r4, $r4, 1 ! $r4 <- bit>>=1
808 srli $r1, $r1, 1 ! $r1 <- den>>=1
809 bnez $r4, .L5 ! if bit!=0, continue loop
811 ! ---------------------------------------------------------------------
813 ! ---------------------------------------------------------------------
814 move $r0, $r2 ! $r0 <- return value
815 ! ---------------------------------------------------------------------
816 ! ---------------------------------------------------------------------
818 .size __udivsi3, .-__udivsi3
819 #endif /* L_udivsi3 */
825 !--------------------------------------
826 #ifdef __big_endian__
837 !--------------------------------------
842 .type __udivdi3, @function
845 #ifdef __NDS32_ISA_V3M__
848 smw.adm $r6, [$sp], $r8, 2
854 #ifdef __NDS32_ISA_V3M__
857 lmw.bim $r6, [$sp], $r8, 2
860 .size __udivdi3, .-__udivdi3
861 #endif /* L_udivdi3 */
870 .type fudiv_qrnnd, @function
871 #ifdef __big_endian__
891 !------------------------------------------------------
892 ! function: fudiv_qrnnd(quotient, remainder, high_numerator, low_numerator, denominator)
893 ! divides a UDWtype, composed by the UWtype integers,HIGH_NUMERATOR (from $r4)
894 ! and LOW_NUMERATOR(from $r5) by DENOMINATOR(from $r6), and places the quotient
895 ! in $r7 and the remainder in $r8.
896 !------------------------------------------------------
897 ! in reg:$r4(n1), $r5(n0), $r6(d0)
898 ! __d1 = ((USItype) (d) >> ((4 * 8) / 2));
899 ! __d0 = ((USItype) (d) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
900 ! __r1 = (n1) % __d1;
901 ! __q1 = (n1) / __d1;
902 ! __m = (USItype) __q1 * __d0;
903 ! __r1 = __r1 * ((USItype) 1 << ((4 * 8) / 2)) | ((USItype) (n0) >> ((4 * 8) / 2));
906 !------------------------------------------------------
907 smw.adm $r0, [$sp], $r4, 2 ! store $lp, when use BASELINE_V1,and must store $r0-$r3
908 srli $r7, $r6, 16 ! $r7 = d1 =__ll_highpart (d)
910 and $r8, $r6, $ta ! $r8 = d0 = __ll_lowpart (d)
912 divr $r9, $r10, $r4, $r7 ! $r9 = q1, $r10 = r1
913 and $r4, $r5, $ta ! $r4 = __ll_lowpart (n0)
914 slli $r10, $r10, 16 ! $r10 = r1 << 16
915 srli $ta, $r5, 16 ! $ta = __ll_highpart (n0)
917 or $r10, $r10, $ta ! $r10 <- $r0|$r3=__r1
918 mul $r5, $r9, $r8 ! $r5 = m = __q1*__d0
919 slt $ta, $r10, $r5 ! $ta <- __r1<__m
920 beqz $ta, .L2 !if yes,skip
921 !------------------------------------------------------
922 ! __q1--, __r1 += (d);
925 !------------------------------------------------------
927 add $r10, $r10, $r6 !$r10 <- __r1+d=__r1
928 addi $r9, $r9, -1 !$r9 <- __q1--=__q1
929 slt $ta, $r10, $r6 !$ta <- __r1<d
930 bnez $ta, .L2 !if yes,skip
931 !------------------------------------------------------
934 !------------------------------------------------------
936 slt $ta, $r10, $r5 !$ta <- __r1<__m
937 beqz $ta, .L2 !if yes,skip
938 !------------------------------------------------------
939 ! __q1--, __r1 += (d);
943 !------------------------------------------------------
945 addi $r9, $r9, -1 !$r9 <- __q1--=__q1
946 add $r10, $r10, $r6 !$r2 <- __r1+d=__r1
948 !------------------------------------------------------
950 ! __r0 = __r1 % __d1;
951 ! __q0 = __r1 / __d1;
952 ! __m = (USItype) __q0 * __d0;
953 ! __r0 = __r0 * ((USItype) 1 << ((4 * 8) / 2)) \
954 ! | ((USItype) (n0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
957 !------------------------------------------------------
958 sub $r10, $r10, $r5 !$r10 <- __r1-__m=__r1
959 divr $r7, $r10, $r10, $r7 !$r7 <- r1/__d1=__q0,$r10 <- r1%__d1=__r0
960 slli $r10, $r10, 16 !$r10 <- __r0<<16
961 mul $r5, $r8, $r7 !$r5 <- __q0*__d0=__m
962 or $r10, $r4, $r10 !$r3 <- $r0|__ll_lowpart (n0) =__r0
963 slt $ta, $r10, $r5 !$ta <- __r0<__m
964 beqz $ta, .L5 !if yes,skip
965 !------------------------------------------------------
966 ! __q0--, __r0 += (d);
969 !------------------------------------------------------
971 add $r10, $r10, $r6 !$r10 <- __r0+d=__r0
972 addi $r7, $r7, -1 !$r7 <- __q0--=__q0
973 slt $ta, $r10, $r6 !$ta <- __r0<d
974 bnez $ta, .L5 !if yes,skip
975 !------------------------------------------------------
978 !------------------------------------------------------
980 slt $ta, $r10, $r5 !$ta <- __r0<__m
981 beqz $ta, .L5 !if yes,skip
982 !------------------------------------------------------
983 ! __q0--, __r0 += (d);
987 !------------------------------------------------------
989 add $r10, $r10, $r6 !$r3 <- __r0+d=__r0
990 addi $r7, $r7, -1 !$r2 <- __q0--=__q0
992 !------------------------------------------------------
994 ! *q = (USItype) __q1 * ((USItype) 1 << ((4 * 8) / 2)) | __q0;
997 !------------------------------------------------------
999 sub $r8, $r10, $r5 !$r8 = r = r0 = __r0-__m
1000 slli $r9, $r9, 16 !$r9 <- __q1<<16
1001 or $r7, $r9, $r7 !$r7 = q = $r9|__q0
1002 lmw.bim $r0, [$sp], $r4, 2
1004 .size fudiv_qrnnd, .-fudiv_qrnnd
1008 .type __udivmoddi4, @function
1010 ! =====================================================================
1012 ! sp+40 +------------------+
1014 ! sp+36 +------------------+
1016 ! sp+32 +------------------+
1018 ! sp+28 +------------------+
1020 ! sp+24 +------------------+
1022 ! sp+20 +------------------+
1024 ! sp +------------------+
1025 ! =====================================================================
1028 smw.bi $r6, [$sp], $r10, 10
1029 !------------------------------------------------------
1036 !------------------------------------------------------
1038 move $fp, $r4 !$fp <- rp
1039 bnez P2H, .L9 !if yes,skip
1040 !------------------------------------------------------
1043 !------------------------------------------------------
1045 slt $ta, P1H, P2L !$ta <- n1<d0
1046 beqz $ta, .L10 !if yes,skip
1047 #ifndef __NDS32_PERF_EXT__
1048 smw.adm $r0, [$sp], $r5, 0
1052 lmw.bim $r0, [$sp], $r5, 0
1057 beqz $r7, .L18 !if yes,skip
1058 !------------------------------------------------------
1060 ! n1 = (n1 << bm) | (n0 >> ((4 * 8) - bm));
1063 !------------------------------------------------------
1065 subri $r5, $r7, 32 !$r5 <- 32-bm
1066 srl $r5, P1L, $r5 !$r5 <- n0>>$r5
1067 sll $r6, P1H, $r7 !$r6 <- n1<<bm
1068 or P1H, $r6, $r5 !P2h <- $r5|$r6=n1
1069 sll P1L, P1L, $r7 !P1H <- n0<<bm=n0
1070 sll P2L, P2L, $r7 !P2L <- d0<<bm=d0
1072 !------------------------------------------------------
1073 ! fudiv_qrnnd (&q0, &n0, n1, n0, d0);
1076 !------------------------------------------------------
1078 move $r4,P1H ! give fudiv_qrnnd args
1081 bal fudiv_qrnnd !calcaulte q0 n0
1082 movi $r6, 0 !P1L <- 0
1083 swi $r7,[$sp+32] !q0
1084 swi $r6,[$sp+36] !q1
1088 !------------------------------------------------------
1089 ! else #if (d0 > n1)
1092 !------------------------------------------------------
1094 bnez P2L, .L20 !if yes,skip
1095 !------------------------------------------------------
1097 !------------------------------------------------------
1099 movi $r4, 1 !P1L <- 1
1100 divr P2L, $r4, $r4, P2L !$r9=1/d0,P1L=1%d0
1103 #ifndef __NDS32_PERF_EXT__
1104 smw.adm $r0, [$sp], $r5, 0
1108 lmw.bim $r0, [$sp], $r5, 0
1112 swi $r7,[$sp+(28)] ! store bm
1113 beqz $r7, .L28 ! if yes,skip
1114 !------------------------------------------------------
1118 ! n1 = (n1 << bm) | (n0 >> b);
1120 ! fudiv_qrnnd (&q1, &n1, n2, n1, d0);
1122 !------------------------------------------------------
1124 subri $r10, $r7, 32 !$r10 <- 32-bm=b
1125 srl $r4, P1L, $r10 !$r4 <- n0>>b
1126 sll $r5, P1H, $r7 !$r5 <- n1<<bm
1127 or $r5, $r5, $r4 !$r5 <- $r5|$r4=n1 !for fun
1128 sll P2L, P2L, $r7 !P2L <- d0<<bm=d0 !for fun
1129 sll P1L, P1L, $r7 !P1L <- n0<<bm=n0
1130 srl $r4, P1H, $r10 !$r4 <- n1>>b=n2 !for fun
1132 move $r6,P2L !for fun
1133 bal fudiv_qrnnd !caculate q1, n1
1135 swi $r7,[$sp+(36)] ! q1 store
1136 move P1H,$r8 ! n1 store
1138 move $r4,$r8 ! prepare for next fudiv_qrnnd()
1143 !------------------------------------------------------
1149 !------------------------------------------------------
1151 sub P1H, P1H, P2L !P1L <- n1-d0=n1
1153 swi $ta, [$sp+(36)] !1 -> [$sp+(36)]
1155 move $r4,P1H ! give fudiv_qrnnd args
1159 !------------------------------------------------------
1160 ! fudiv_qrnnd (&q0, &n0, n1, n0, d0);
1161 !------------------------------------------------------
1163 bal fudiv_qrnnd !calcuate q0, n0
1164 swi $r7,[$sp+(32)] !q0 store
1167 !------------------------------------------------------
1170 !------------------------------------------------------
1172 beqz $fp, .L31 !if yes,skip
1173 !------------------------------------------------------
1174 ! rr.s.low = n0 >> bm;
1178 !------------------------------------------------------
1180 movi $r5, 0 !$r5 <- 0
1181 lwi $r7,[$sp+(28)] !load bm
1182 srl $r4, P1L, $r7 !$r4 <- n0>>bm
1183 swi $r4, [$fp+OFFSET_L] !r0 !$r4 -> [$sp+(48)]
1184 swi $r5, [$fp+OFFSET_H] !r1 !0 -> [$sp+(52)]
1187 !------------------------------------------------------
1192 !------------------------------------------------------
1194 slt $ta, P1H, P2H !$ta <- n1<d1
1195 beqz $ta, .L32 !if yes,skip
1196 !------------------------------------------------------
1201 !------------------------------------------------------
1203 movi $r5, 0 !$r5 <- 0
1204 swi $r5, [$sp+(32)] !q0 !0 -> [$sp+(40)]=q1
1205 swi $r5, [$sp+(36)] !q1 !0 -> [$sp+(32)]=q0
1206 beqz $fp, .L31 !if yes,skip
1207 !------------------------------------------------------
1212 !------------------------------------------------------
1214 swi P1L, [$fp+OFFSET_L] !P1L -> [rp]
1215 swi P1H, [$fp+OFFSET_H] !P1H -> [rp+4]
1218 #ifndef __NDS32_PERF_EXT__
1219 smw.adm $r0, [$sp], $r5, 0
1223 lmw.bim $r0, [$sp], $r5, 0
1227 swi $r7,[$sp+(28)] !$r7=bm store
1228 beqz $r7, .L42 !if yes,skip
1229 !------------------------------------------------------
1232 ! d1 = (d0 >> b) | (d1 << bm);
1235 ! n1 = (n0 >> b) | (n1 << bm);
1237 ! fudiv_qrnnd (&q0, &n1, n2, n1, d1);
1238 !------------------------------------------------------
1240 subri $r10, $r7, 32 !$r10 <- 32-bm=b
1241 srl $r5, P2L, $r10 !$r5 <- d0>>b
1242 sll $r6, P2H, $r7 !$r6 <- d1<<bm
1243 or $r6, $r5, $r6 !$r6 <- $r5|$r6=d1 !! func
1244 move P2H, $r6 !P2H <- d1
1245 srl $r4, P1H, $r10 !$r4 <- n1>>b=n2 !!! func
1246 srl $r8, P1L, $r10 !$r8 <- n0>>b !!$r8
1247 sll $r9, P1H, $r7 !$r9 <- n1<<bm
1248 or $r5, $r8, $r9 !$r5 <- $r8|$r9=n1 !func
1249 sll P2L, P2L, $r7 !P2L <- d0<<bm=d0
1250 sll P1L, P1L, $r7 !P1L <- n0<<bm=n0
1252 bal fudiv_qrnnd ! cal q0,n1
1254 move P1H,$r8 ! fudiv_qrnnd (&q0, &n1, n2, n1, d1);
1255 move $r6, $r7 ! from func
1257 !----------------------------------------------------
1258 ! #umul_ppmm (m1, m0, q0, d0);
1260 ! { USItype __x0, __x1, __x2, __x3;
1261 ! USItype __ul, __vl, __uh, __vh;
1262 ! __ul = ((USItype) (q0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
1263 ! __uh = ((USItype) (q0) >> ((4 * 8) / 2));
1264 ! __vl = ((USItype) (d0) & (((USItype) 1 << ((4 * 8) / 2)) - 1));
1265 ! __vh = ((USItype) (d0) >> ((4 * 8) / 2));
1266 ! __x0 = (USItype) __ul * __vl;
1267 ! __x1 = (USItype) __ul * __vh;
1268 ! __x2 = (USItype) __uh * __vl;
1269 ! __x3 = (USItype) __uh * __vh;
1270 ! __x1 += ((USItype) (__x0) >> ((4 * 8) / 2));
1273 ! __x3 += ((USItype) 1 << ((4 * 8) / 2));
1274 ! (m1) = __x3 + ((USItype) (__x1) >> ((4 * 8) / 2));
1275 ! (m0) = (USItype)(q0*d0);
1278 !---------------------------------------------------
1279 #ifdef __NDS32_ISA_V3M__
1280 !mulr64 $r4, P2L, $r6
1281 smw.adm $r0, [$sp], $r3, 0
1288 lmw.bim $r0, [$sp], $r3, 0
1292 mulr64 $r4, P2L, $r6
1296 slt $ta, P1H, $r8 !$ta <- n1<m1
1297 bnez $ta, .L46 !if yes,skip
1298 !------------------------------------------------------
1300 !------------------------------------------------------
1302 bne $r8, P1H, .L45 !if yes,skip
1303 !------------------------------------------------------
1305 !------------------------------------------------------
1307 slt $ta, P1L, $r5 !$ta <- n0<m0
1308 beqz $ta, .L45 !if yes,skip
1310 !------------------------------------------------------
1313 ! # sub_ddmmss (m1, m0, m1, m0, d1, d0);
1316 ! __x = (m0) - (d0);
1317 ! (m1) = (m1) - (d1) - (__x > (m0));
1321 !------------------------------------------------------
1323 sub $r4, $r5, P2L !$r4 <- m0-d0=__x
1324 addi $r6, $r6, -1 !$r6 <- q0--=q0
1325 sub $r8, $r8, P2H !$r8 <- m1-d1
1326 swi $r6, [$sp+(32)] ! q0 !$r6->[$sp+(32)]
1327 slt $ta, $r5, $r4 !$ta <- m0<__x
1328 sub $r8, $r8, $ta !$r8 <- P1H-P1L=m1
1329 move $r5, $r4 !$r5 <- __x=m0
1331 !------------------------------------------------------
1335 !------------------------------------------------------
1337 movi $r4, 0 !$r4 <- 0
1338 swi $r4, [$sp+(36)] !0 -> [$sp+(40)]=q1
1339 beqz $fp, .L31 !if yes,skip
1340 !------------------------------------------------------
1341 ! # sub_ddmmss (n1, n0, n1, n0, m1, m0);
1344 ! __x = (n0) - (m0);
1345 ! (n1) = (n1) - (m1) - (__x > (n0));
1348 ! rr.s.low = (n1 << b) | (n0 >> bm);
1349 ! rr.s.high = n1 >> bm;
1351 !------------------------------------------------------
1353 sub $r4, P1H, $r8 !$r4 <- n1-m1
1354 sub $r6, P1L, $r5 !$r6 <- n0-m0=__x=n0
1355 slt $ta, P1L, $r6 !$ta <- n0<__x
1356 sub P1H, $r4, $ta !P1H <- $r4-$ta=n1
1359 lwi $r7,[$sp+(28)] ! load bm
1361 sll $r4, P1H, $r10 !$r4 <- n1<<b
1362 srl $r5, P1L, $r7 !$r5 <- __x>>bm
1363 or $r6, $r5, $r4 !$r6 <- $r5|$r4=rr.s.low
1364 srl $r8, P1H, $r7 !$r8 <- n1>>bm =rr.s.high
1365 swi $r6, [$fp+OFFSET_L] !
1366 swi $r8, [$fp+OFFSET_H] !
1369 !------------------------------------------------------
1373 !------------------------------------------------------
1375 slt $ta, P2H, P1H !$ta <- P2H<P1H
1376 bnez $ta, .L52 !if yes,skip
1377 !------------------------------------------------------
1379 !------------------------------------------------------
1381 slt $ta, P1L, P2L !$ta <- P1L<P2L
1382 bnez $ta, .L51 !if yes,skip
1383 !------------------------------------------------------
1387 ! __x = (n0) - (d0);
1388 ! (n1) = (n1) - (d1) - (__x > (n0));
1391 !------------------------------------------------------
1393 sub $r4, P1H, P2H !$r4 <- P1H-P2H
1394 sub $r6, P1L, P2L !$r6 <- no-d0=__x=n0
1395 slt $ta, P1L, $r6 !$ta <- no<__x
1396 sub P1H, $r4, $ta !P1H <- $r4-$ta=n1
1399 swi $r5, [$sp+(32)] !1 -> [$sp+(32)]=q0
1402 !------------------------------------------------------
1404 !------------------------------------------------------
1407 swi $r5, [$sp+(32)] !$r5=0 -> [$sp+(32)]
1409 !------------------------------------------------------
1413 !------------------------------------------------------
1416 swi $r5, [$sp+(36)] !0 -> [$sp+(36)]
1418 !------------------------------------------------------
1423 !------------------------------------------------------
1425 swi P1L, [$fp+OFFSET_L] !remainder
1426 swi P1H, [$fp+OFFSET_H] !
1428 !------------------------------------------------------
1429 ! const DWunion ww = {{.low = q0, .high = q1}};
1432 !------------------------------------------------------
1434 lwi P1L, [$sp+(32)] !quotient
1436 lmw.bim $r6, [$sp], $r10, 10
1439 .size __udivmoddi4, .-__udivmoddi4
1440 #endif /* L_udivmoddi4 */
1446 ! =====================================================================
1450 .type __umodsi3, @function
1452 ! ---------------------------------------------------------------------
1453 !!res=udivmodsi4(a,b,1);
1456 ! ---------------------------------------------------------------------
1457 beqz $r1, .L1 ! if den==0, skip
1458 ! ---------------------------------------------------------------------
1461 ! ---------------------------------------------------------------------
1462 movi $r4, 1 ! $r4 <- bit=1
1463 #ifndef __OPTIMIZE_SIZE__
1466 ! ---------------------------------------------------------------------
1468 ! ---------------------------------------------------------------------
1469 slt $ta, $r1, $r0 ! $ta <- den<num?
1470 beqz $ta, .L5 ! if no, skip
1471 ! ---------------------------------------------------------------------
1472 ! &&bit&&!(den&(1L<<31)))
1473 ! ---------------------------------------------------------------------
1474 bltz $r1, .L5 ! if den<0, skip
1475 ! ---------------------------------------------------------------------
1479 ! ---------------------------------------------------------------------
1480 #if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__)
1481 clz $r3, $r1 ! $r3 <- leading zero count for den
1482 clz $ta, $r0 ! $ta <- leading zero count for num
1483 sub $r3, $r3, $ta ! $r3 <- number of bits to shift
1484 sll $r1, $r1, $r3 ! $r1 <- den
1485 sll $r4, $r4, $r3 ! $r2 <- bit
1487 slli $r1, $r1, 1 ! $r1 <- den<<=1
1488 slli $r4, $r4, 1 ! $r4 <- bit<<=1
1489 b .L6 ! continue loop
1492 ! ---------------------------------------------------------------------
1504 ! ---------------------------------------------------------------------
1505 sub $r2, $r0, $r1 ! $r2 <- num-den
1506 slt $ta, $r0, $r1 ! $ta <- num<den?
1507 srli $r4, $r4, 1 ! $r4 <- bit>>=1
1508 cmovz $r0, $r2, $ta ! $r0 <- num=(num<den)?num:num-den
1509 srli $r1, $r1, 1 ! $r1 <- den>>=1
1510 bnez $r4, .L5 ! if bit!=0, continue loop
1512 ! ---------------------------------------------------------------------
1514 ! ---------------------------------------------------------------------
1516 .size __umodsi3, .-__umodsi3
1517 #endif /* L_umodsi3 */
1523 !--------------------------------------
1524 #ifdef __big_endian__
1535 !--------------------------------------
1539 .type __umoddi3, @function
1547 lwi $r0, [$sp+(4)] ! __udivmoddi4 return low when LE mode or return high when BE mode
1548 lwi $r1, [$sp+(8)] !
1554 .size __umoddi3, .-__umoddi3
1555 #endif /* L_umoddi3 */
1561 #ifdef __big_endian__
1579 ! ====================================================================
1583 .type __muldi3, @function
1585 ! parameter passing for libgcc functions normally involves 2 doubles
1586 !---------------------------------------
1587 #ifdef __NDS32_ISA_V3M__
1588 ! There is no mulr64 instruction in Andes ISA V3M.
1589 ! So we must provide a sequence of calculations to complete the job.
1590 smw.adm $r6, [$sp], $r9, 0x0
1598 maddr32 $r9, $r8, $r4
1603 maddr32 $r5, $r8, $r7
1606 maddr32 P2L, P2H, P1L
1607 add333 P1H, $r5, $r7
1610 add333 P1L, $r4, $r6
1611 add333 P1H, P2L, P1H
1612 lmw.bim $r6, [$sp], $r9, 0x0
1614 #else /* not __NDS32_ISA_V3M__ */
1616 mulr64 $r4, P1L, P2L
1617 maddr32 $ta, P1H, P2L
1621 #endif /* not __NDS32_ISA_V3M__ */
1622 .size __muldi3, .-__muldi3
1623 #endif /* L_muldi3 */
1629 #ifndef __big_endian__
1651 .type __subdf3, @function
1656 move $r4, #0x80000000
1662 .type __adddf3, @function
1673 move O1L, #0x80000000
1684 slti $r15, $r10, #0x7fe
1689 slti $r15, $r10, #0x7fe
1693 #NORMd($r4, P2L, P1L)
1704 #ifndef __big_endian__
1705 #ifdef __NDS32_PERF_EXT__
1716 #else /* __big_endian__ */
1717 #ifdef __NDS32_PERF_EXT__
1728 #endif /* __big_endian__ */
1739 #NORMd($r7, P2L, P1L)
1750 #ifndef __big_endian__
1751 #ifdef __NDS32_PERF_EXT__
1762 #else /* __big_endian__ */
1763 #ifdef __NDS32_PERF_EXT__
1774 #endif /* __big_endian__ */
1785 move $r10, #0x80000000
1788 beq $r6, $r9, .LEadd3
1800 addi $r9, $r9, #0xffffffe0
1808 subri $r9, $r9, #0x20
1828 addi $r9, $r9, #0xffffffe0
1836 subri $r9, $r9, #0x20
1846 sltsi $r15, $r10, #0
1879 move $r10, #0x80000000
1882 subri $r15, $r6, #0x7ff
1884 move $r10, #0x7ff00000
1901 subi333 P3H, P3H, #1
1905 subi333 P3H, P3H, #1
1915 move $r10, #0x80000000
1925 #NORMd($r4, $r9, P1L)
1936 #ifdef __NDS32_PERF_EXT__
1983 srli $r10, $r10, #12
2008 subri $r15, $r9, #0x7ff
2023 subri $r15, $r9, #0x7ff
2026 sltsi $r15, $r10, #0
2048 move P1H, #0xfff80000
2062 addi $r9, $r9, #0xffffffe0
2070 subri $r9, $r9, #0x20
2080 .size __subdf3, .-__subdf3
2081 .size __adddf3, .-__adddf3
2082 #endif /* L_addsub_df */
2088 #if !defined (__big_endian__)
2102 .type __mulsf3, @function
2108 andi $r3, $r3, #0xff
2110 andi $r5, $r5, #0xff
2111 move $r6, #0x80000000
2120 slti $r15, $r8, #0xfe
2125 slti $r15, $r8, #0xfe
2130 /* This is a 64-bit multiple. ($r2, $r7) is (high, low). */
2131 #ifndef __NDS32_ISA_V3M__
2132 mulr64 $r2, $r2, $r4
2145 #ifndef __big_endian__
2162 addi $r8, $r5, #0xffffff82
2165 slti $r15, $r8, #0xfe
2194 #ifdef __NDS32_PERF_EXT__
2207 subri $r15, $r5, #0xff
2217 subri $r15, $r5, #0xff
2224 #ifdef __NDS32_PERF_EXT__
2245 move $r8, #0x7f800000
2250 move $r0, #0xffc00000
2256 slti $r15, $r7, #0x20
2258 subri $r8, $r7, #0x20
2265 addi $r8, $r2, #0x80
2270 .size __mulsf3, .-__mulsf3
2271 #endif /* L_mul_sf */
2277 #ifndef __big_endian__
2299 .type __muldf3, @function
2310 move O1L, #0x80000000
2323 slti $r15, $r10, #0x7fe
2328 slti $r15, $r10, #0x7fe
2332 addi $r10, $r9, #0xfffffc02
2336 /* This is a 64-bit multiple. */
2337 #ifndef __big_endian__
2338 /* For little endian: ($r9, $r3) is (high, low). */
2339 #ifndef __NDS32_ISA_V3M__
2340 mulr64 $r8, $r5, $r8
2352 #else /* __big_endian__ */
2353 /* For big endain: ($r9, $r2) is (high, low). */
2354 #ifndef __NDS32_ISA_V3M__
2355 mulr64 $r8, $r4, $r7
2368 #endif /* __big_endian__ */
2372 /* This is a 64-bit multiple. */
2373 #ifndef __big_endian__
2374 /* For little endian: ($r0, $r2) is (high, low). */
2375 #ifndef __NDS32_ISA_V3M__
2376 mulr64 $r0, $r4, $r8
2388 #else /* __big_endian__ */
2389 /* For big endain: ($r1, $r3) is (high, low). */
2390 #ifndef __NDS32_ISA_V3M__
2391 mulr64 $r0, $r5, $r7
2403 #endif /* __big_endian__ */
2414 /* This is a 64-bit multiple. */
2415 #ifndef __big_endian__
2416 /* For little endian: ($r0, $r8) is (high, low). */
2417 #ifndef __NDS32_ISA_V3M__
2418 mulr64 $r0, $r5, $r7
2430 #else /* __big_endian__ */
2431 /* For big endian: ($r1, $r7) is (high, low). */
2432 #ifndef __NDS32_ISA_V3M__
2433 mulr64 $r0, $r4, $r8
2445 #endif /* __big_endian__ */
2473 /* This is a 64-bit multiple. */
2474 #ifndef __big_endian__
2475 /* For little endian: ($r8, $r0) is (high, low). */
2477 #ifndef __NDS32_ISA_V3M__
2478 mulr64 $r8, $r4, $r7
2492 #else /* __big_endian__ */
2493 /* For big endian: ($r7, $r1) is (high, low). */
2495 #ifndef __NDS32_ISA_V3M__
2496 mulr64 $r6, $r5, $r8
2510 #endif /* __big_endian__ */
2542 slti $r15, $r10, #0x7fe
2568 srli $r10, $r10, #12
2593 #NORMd($r4, P1L, P2H)
2604 #ifndef __big_endian__
2605 #ifdef __NDS32_PERF_EXT__
2613 #else /* __big_endian__ */
2614 #ifdef __NDS32_PERF_EXT__
2625 #endif /* __big_endian__ */
2638 subri $r15, $r9, #0x7ff
2649 subri $r15, $r9, #0x7ff
2666 #NORMd($r7, P2L, P1L)
2677 #ifndef __big_endian__
2678 #ifdef __NDS32_PERF_EXT__
2689 #else /* __big_endian__ */
2690 #ifdef __NDS32_PERF_EXT__
2701 #endif /* __big_endian__ */
2721 move $r10, #0x7ff00000
2727 move P1H, #0xfff80000
2742 addi P1L, P1L, #0xffffffe0
2750 subri P1L, P1L, #0x20
2767 .size __muldf3, .-__muldf3
2768 #endif /* L_mul_df */
2777 .type __divsf3, @function
2782 move $r7, #0x80000000
2784 andi $r4, $r4, #0xff
2786 andi $r6, $r6, #0xff
2795 slti $r15, $r10, #0xfe
2800 slti $r15, $r10, #0xfe
2810 divr $r0, $r2, $r3, $r8
2811 andi $r9, $r5, #0x3fff
2826 divr $r3, $r2, $r2, $r8
2847 subri $r10, $r6, #0x7e
2850 slti $r15, $r10, #0xfe
2879 #ifdef __NDS32_PERF_EXT__
2896 subri $r15, $r6, #0xff
2907 #ifdef __NDS32_PERF_EXT__
2930 slti $r15, $r8, #0x20
2932 subri $r10, $r8, #0x20
2939 addi $r10, $r3, #0x80
2940 sltsi $r15, $r10, #0
2946 move $r10, #0x7f800000
2951 move $r0, #0xffc00000
2953 .size __divsf3, .-__divsf3
2954 #endif /* L_div_sf */
2960 #ifndef __big_endian__
2982 .type __divdf3, @function
2993 move O1L, #0x80000000
3006 slti $r15, $r10, #0x7fe
3011 slti $r15, $r10, #0x7fe
3016 addi $r6, $r6, #0x3ff
3022 divr P2H, P3H, P3H, $r9
3042 divr $r9, P3H, P3H, $r9
3064 /* This is a 64-bit multiple. */
3065 #ifndef __big_endian__
3066 /* For little endian: ($r0, $r9) is (high, low). */
3068 #ifndef __NDS32_ISA_V3M__
3069 mulr64 $r0, $r3, $r7
3082 #else /* __big_endian__ */
3083 /* For big endian: ($r1, $r9) is (high, low). */
3085 #ifndef __NDS32_ISA_V3M__
3086 mulr64 $r0, $r2, $r8
3099 #endif /* __big_endian__ */
3115 subi333 P3H, P3H, #1
3119 subi333 P3H, P3H, #1
3165 divr $r9, P3H, P3H, P2L
3185 divr P2L, P3H, P3H, P2L
3207 /* This is a 64-bit multiple. */
3208 #ifndef __big_endian__
3209 /* For little endian: ($r0, $r2) is (high, low). */
3211 #ifndef __NDS32_ISA_V3M__
3212 mulr64 $r0, $r9, $r7
3225 #else /* __big_endian__ */
3226 /* For big endian: ($r1, $r3) is (high, low). */
3228 #ifndef __NDS32_ISA_V3M__
3229 mulr64 $r0, $r9, $r8
3242 #endif /* __big_endian__ */
3259 subi333 P3H, P3H, #1
3263 subi333 P3H, P3H, #1
3321 slti $r15, $r10, #0x7fe
3347 srli $r10, $r10, #12
3369 addi P2H, P2H, #0xffffffe0
3377 subri P2H, P2H, #0x20
3409 #NORMd($r4, P2H, P2L)
3420 #ifndef __big_endian__
3421 #ifdef __NDS32_PERF_EXT__
3432 #else /* __big_endian__ */
3433 #ifdef __NDS32_PERF_EXT__
3444 #endif /* __big_endian_ */
3462 subri $r15, $r9, #0x7ff
3469 move $r10, #0x7ff00000
3488 #NORMd($r7, P2H, P2L)
3499 #ifndef __big_endian__
3500 #ifdef __NDS32_PERF_EXT__
3511 #else /* __big_endian__ */
3512 #ifdef __NDS32_PERF_EXT__
3523 #endif /* __big_endian__ */
3540 move P1H, #0xfff80000
3544 .size __divdf3, .-__divdf3
3545 #endif /* L_div_df */
3554 .type __negsf2, @function
3558 move $r1, #0x80000000
3564 .size __negsf2, .-__negsf2
3565 #endif /* L_negate_sf */
3571 #ifndef __big_endian__
3579 .type __negdf2, @function
3583 move $r2, #0x80000000
3589 .size __negdf2, .-__negdf2
3590 #endif /* L_negate_df */
3596 #ifndef __big_endian__
3605 .global __extendsfdf2
3606 .type __extendsfdf2, @function
3611 andi $r3, $r3, #0xff
3612 move $r5, #0x80000000
3615 slti $r15, $r5, #0xfe
3619 addi $r3, $r3, #0x380
3650 move $r5, #0xff000000
3653 move O1H, #0xfff80000
3655 .size __extendsfdf2, .-__extendsfdf2
3656 #endif /* L_sf_to_df */
3662 #ifndef __big_endian__
3675 .global __truncdfsf2
3676 .type __truncdfsf2, @function
3685 move $r7, #0x80000000
3690 addi $r4, $r4, #0xfffffc80
3692 slti $r15, $r7, #0xfe
3721 subri $r15, $r4, #0x47f
3726 move $r0, #0xffc00000
3729 sltsi $r15, $r4, #0xff
3731 move $r7, #0x7f800000
3742 subri $r8, $r6, #0x20
3747 move $r7, #0x80000000
3750 .size __truncdfsf2, .-__truncdfsf2
3751 #endif /* L_df_to_sf */
3757 #ifndef __big_endian__
3765 .type __fixdfsi, @function
3773 move $r6, #0x80000000
3777 subri $r2, $r6, #0x41e
3800 move $r6, #0x7ff00000
3803 move $r0, #0x80000000
3806 move $r0, #0x7fffffff
3808 .size __fixdfsi, .-__fixdfsi
3809 #endif /* L_df_to_si */
3815 #ifndef __big_endian__
3825 .type __fixsfdi, @function
3830 andi $r3, $r3, #0xff
3832 move $r5, #0x80000000
3835 sltsi $r15, $r3, #0xbe
3837 subri $r3, $r3, #0xbe
3844 addi $r3, $r3, #0xffffffe0
3851 subri $r3, $r3, #0x20
3875 subri $r15, $r3, #0xff
3881 move O1H, #0x80000000
3884 move O1H, #0x7fffffff
3887 .size __fixsfdi, .-__fixsfdi
3888 #endif /* L_fixsfdi */
3894 #ifndef __big_endian__
3908 .type __fixdfdi, @function
3919 move $r6, #0x80000000
3921 slti $r15, $r5, #0x43e
3923 subri $r2, $r5, #0x43e
3930 addi $r2, $r2, #0xffffffe0
3937 subri $r2, $r2, #0x20
3962 subri $r15, $r5, #0x7ff
3969 move O1H, #0x80000000
3973 move O1H, #0x7fffffff
3976 .size __fixdfdi, .-__fixdfdi
3977 #endif /* L_fixdfdi */
3983 .global __fixunssfsi
3984 .type __fixunssfsi, @function
3989 move $r3, #0x80000000
3992 andi $r3, $r3, #0xff
3993 subri $r2, $r3, #0x9e
3996 sltsi $r15, $r2, #0x20
4013 move $r3, #0x7f800000
4016 move $r0, #0x80000000
4021 .size __fixunssfsi, .-__fixunssfsi
4022 #endif /* L_fixunssfsi */
4028 #ifndef __big_endian__
4037 .global __fixunsdfsi
4038 .type __fixunsdfsi, @function
4046 move $r6, #0x80000000
4050 subri $r2, $r6, #0x41e
4074 move $r6, #0x7ff00000
4077 move $r0, #0x80000000
4082 .size __fixunsdfsi, .-__fixunsdfsi
4083 #endif /* L_fixunsdfsi */
4089 #ifndef __big_endian__
4098 .global __fixunssfdi
4099 .type __fixunssfdi, @function
4104 andi $r3, $r3, #0xff
4106 move $r5, #0x80000000
4109 sltsi $r15, $r3, #0xbe
4111 subri $r3, $r3, #0xbe
4118 addi $r3, $r3, #0xffffffe0
4125 subri $r3, $r3, #0x20
4147 move O1H, #0x80000000
4150 .size __fixunssfdi, .-__fixunssfdi
4151 #endif /* L_fixunssfdi */
4157 #ifndef __big_endian__
4170 .global __fixunsdfdi
4171 .type __fixunsdfdi, @function
4182 move $r6, #0x80000000
4184 slti $r15, $r5, #0x43e
4186 subri $r2, $r5, #0x43e
4193 addi $r2, $r2, #0xffffffe0
4200 subri $r2, $r2, #0x20
4223 move O1H, #0x80000000
4226 .size __fixunsdfdi, .-__fixunsdfdi
4227 #endif /* L_fixunsdfdi */
4236 .type __floatsisf, @function
4240 move $r4, #0x80000000
4248 #ifdef __NDS32_PERF_EXT__
4281 .size __floatsisf, .-__floatsisf
4282 #endif /* L_si_to_sf */
4288 #ifndef __big_endian__
4302 .type __floatsidf, @function
4314 move O2H, #0x80000000
4323 #ifndef __big_endian__
4324 #ifdef __NDS32_PERF_EXT__
4335 #else /* __big_endian__ */
4336 #ifdef __NDS32_PERF_EXT__
4345 #endif /* __big_endian__ */
4364 .size __floatsidf, .-__floatsidf
4365 #endif /* L_si_to_df */
4371 #ifndef __big_endian__
4385 .type __floatdisf, @function
4390 move $r7, #0x80000000
4408 #NORMd($r2, $r6, P1L)
4419 #ifdef __NDS32_PERF_EXT__
4462 .size __floatdisf, .-__floatdisf
4463 #endif /* L_floatdisf */
4469 #ifndef __big_endian__
4487 .type __floatdidf, @function
4501 move $r7, #0x80000000
4510 #NORMd($r2, O1H, O1L)
4521 #ifdef __NDS32_PERF_EXT__
4523 #else /* not __NDS32_PERF_EXT__ */
4525 Replace clz with function call.
4530 #ifndef __big_endian__
4543 #endif /* not __NDS32_PERF_EXT__ */
4587 .size __floatdidf, .-__floatdidf
4588 #endif /* L_floatdidf */
4592 #ifdef L_floatunsisf
4596 .global __floatunsisf
4597 .type __floatunsisf, @function
4603 #ifdef __NDS32_PERF_EXT__
4636 .size __floatunsisf, .-__floatunsisf
4637 #endif /* L_floatunsisf */
4641 #ifdef L_floatunsidf
4643 #ifndef __big_endian__
4656 .global __floatunsidf
4657 .type __floatunsidf, @function
4667 #ifndef __big_endian__
4668 #ifdef __NDS32_PERF_EXT__
4677 #else /* __big_endian__ */
4678 #ifdef __NDS32_PERF_EXT__
4689 #endif /* __big_endian__ */
4707 .size __floatunsidf, .-__floatunsidf
4708 #endif /* L_floatunsidf */
4712 #ifdef L_floatundisf
4714 #ifndef __big_endian__
4727 .global __floatundisf
4728 .type __floatundisf, @function
4740 #NORMd($r2, $r5, P1L)
4751 #ifdef __NDS32_PERF_EXT__
4794 .size __floatundisf, .-__floatundisf
4795 #endif /* L_floatundisf */
4799 #ifdef L_floatundidf
4801 #ifndef __big_endian__
4818 .global __floatundidf
4819 .type __floatundidf, @function
4832 #NORMd($r2, O1H, O1L)
4843 #ifdef __NDS32_PERF_EXT__
4845 #else /* not __NDS32_PERF_EXT__ */
4847 Replace clz with function call.
4852 #ifndef __big_endian__
4865 #endif /* not __NDS32_PERF_EXT__ */
4908 .size __floatundidf, .-__floatundidf
4909 #endif /* L_floatundidf */
4918 .type __cmpsf2, @function
4921 .type __eqsf2, @function
4924 .type __ltsf2, @function
4927 .type __lesf2, @function
4930 .type __nesf2, @function
4936 .type __gesf2, @function
4939 .type __gtsf2, @function
4949 move $r5, #0xff000000
4984 .size __cmpsf2, .-__cmpsf2
4985 .size __eqsf2, .-__eqsf2
4986 .size __ltsf2, .-__ltsf2
4987 .size __lesf2, .-__lesf2
4988 .size __nesf2, .-__nesf2
4989 .size __gesf2, .-__gesf2
4990 .size __gtsf2, .-__gtsf2
4991 #endif /* L_compare_sf */
4997 #ifdef __big_endian__
5016 .type __gtdf2, @function
5017 .type __gedf2, @function
5018 .type __ltdf2, @function
5019 .type __ledf2, @function
5020 .type __eqdf2, @function
5021 .type __nedf2, @function
5022 .type __cmpdf2, @function
5035 #if defined (__NDS32_ISA_V3M__)
5038 smw.adm $r6, [$sp], $r9, 0
5042 and $r6, P1H, $r5 ! r6=aExp
5043 and $r7, P2H, $r5 ! r7=bExp
5044 slli $r8, P1H, 12 ! r8=aSig0
5045 slli $r9, P2H, 12 ! r9=bSig0
5046 beq $r6, $r5, .L11 ! aExp==0x7ff
5047 beq $r7, $r5, .L12 ! bExp==0x7ff
5049 slli $ta, P1H, 1 ! ta=ahigh<<1
5051 xor $r5, P1H, P2H ! r5=ahigh^bhigh
5052 beqz $ta, .L3 ! if(ahigh<<1)==0,go .L3
5053 !-------------------------------
5054 ! (ahigh<<1)!=0 || (bhigh<<1)!=0
5055 !-------------------------------
5057 beqz $r5, .L5 ! ahigh==bhigh, go .L5
5058 !--------------------
5060 !--------------------
5062 bltz $r5, .L7 ! if(aSign!=bSign), go .L7
5063 !--------------------
5065 !--------------------
5066 slt $ta, $r6, $r7 ! ta=(aExp<bExp)
5067 bne $r6, $r7, .L8 ! if(aExp!=bExp),go .L8
5068 slt $ta, $r8, $r9 ! ta=(aSig0<bSig0)
5069 bne $r8, $r9, .L8 ! if(aSig0!=bSig0),go .L8
5070 slt $ta, P1L, P2L ! ta=(aSig1<bSig1)
5072 beqz $ta, .L10 ! if(|a|>|b|), go .L10
5073 nor $r0, P2H, P2H ! if(|a|<|b|),return (~yh)
5075 #if defined (__NDS32_ISA_V3M__)
5078 lmw.bim $r6, [$sp], $r9, 0
5082 ori $r0, P2H, 1 ! return (yh|1)
5084 !--------------------
5086 !--------------------
5088 slli $ta, P2H, 1 ! ta=bhigh<<1
5090 bnez $ta, .L4 ! ta=(bhigh<<1)!=0,go .L4
5092 xor $ta, P1L, P2L ! ta=alow^blow
5093 bnez $ta, .L6 ! alow!=blow,go .L6
5094 movi $r0, 0 ! a==b, return 0
5096 !--------------------
5098 !--------------------
5100 or P1L, P1L, $r8 ! x1=(aSig0|aSig1)
5101 bnez P1L, .L13 ! if(a=nan), go.L13
5102 xor $ta, $r7, $r5 ! ta=(bExp^0x7ff)
5103 bnez $ta, .L2 ! if(bExp!=0x7ff), go .L2
5104 !--------------------
5106 !--------------------
5108 or $ta, P2L, $r9 ! ta=(bSig0|bSig1)
5109 beqz $ta, .L2 ! if(b!=nan), go .L2
5113 !--------------------
5115 !--------------------
5117 ori $r0, P1H, 1 ! if(aSign!=bSign), return (ahigh|1)
5120 .size __gtdf2, .-__gtdf2
5121 .size __gedf2, .-__gedf2
5122 .size __ltdf2, .-__ltdf2
5123 .size __ledf2, .-__ledf2
5124 .size __eqdf2, .-__eqdf2
5125 .size __nedf2, .-__nedf2
5126 .size __cmpdf2, .-__cmpdf2
5127 #endif /* L_compare_df */
5136 .type __unordsf2, @function
5141 move $r3, #0xff000000
5148 move $r3, #0xff000000
5159 .size __unordsf2, .-__unordsf2
5160 #endif /* L_unord_sf */
5166 #ifndef __big_endian__
5180 .type __unorddf2, @function
5188 move $r5, #0xffe00000
5198 move $r5, #0xffe00000
5209 .size __unorddf2, .-__unorddf2
5210 #endif /* L_unord_df */
5211 /* ------------------------------------------- */
5212 /* DPBIT floating point operations for libgcc */
5213 /* ------------------------------------------- */