1 /* TCC runtime library.
2 Parts of this code are (c) 2002 Fabrice Bellard
4 Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
6 This file is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
11 In addition to the permissions in the GNU General Public License, the
12 Free Software Foundation gives you unlimited permission to link the
13 compiled version of this file into combinations with other programs,
14 and to distribute those combinations without any restriction coming
15 from the use of this file. (The General Public License restrictions
16 do apply in other respects; for example, they cover modification of
17 the file, and distribution when not linked into a combine
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 59 Temple Place - Suite 330,
28 Boston, MA 02111-1307, USA.
31 #define W_TYPE_SIZE 32
32 #define BITS_PER_UNIT 8
35 typedef unsigned int UWtype
;
36 typedef unsigned int USItype
;
37 typedef long long DWtype
;
38 typedef unsigned long long UDWtype
;
50 typedef long double XFtype
;
51 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
52 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
54 /* the following deal with IEEE single-precision numbers */
56 #define SIGNBIT 0x80000000
57 #define HIDDEN (1 << 23)
58 #define SIGN(fp) ((fp) & SIGNBIT)
59 #define EXP(fp) (((fp) >> 23) & 0xFF)
60 #define MANT(fp) (((fp) & 0x7FFFFF) | HIDDEN)
61 #define PACK(s,e,m) ((s) | ((e) << 23) | (m))
63 /* the following deal with IEEE double-precision numbers */
65 #define HIDDEND (1 << 20)
66 #define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF)
67 #define SIGND(fp) ((fp.l.upper) & SIGNBIT)
68 #define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
70 #define HIDDEND_LL ((long long)1 << 52)
71 #define MANTD_LL(fp) ((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL)
72 #define PACKD_LL(s,e,m) (((long long)((s)+((e)<<20))<<32)|(m))
74 /* the following deal with x86 long double-precision numbers */
75 #define EXCESSLD 16382
76 #define EXPLD(fp) (fp.l.upper & 0x7fff)
77 #define SIGNLD(fp) ((fp.l.upper) & 0x8000)
83 unsigned long long lower
;
109 /* XXX: we don't support several builtin supports for now */
110 #if !defined __x86_64__ && !defined __arm__ && !defined __riscv
112 /* XXX: use gcc/tcc intrinsic ? */
114 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
115 __asm__ ("subl %5,%1\n\tsbbl %3,%0" \
116 : "=r" ((USItype) (sh)), \
117 "=&r" ((USItype) (sl)) \
118 : "0" ((USItype) (ah)), \
119 "g" ((USItype) (bh)), \
120 "1" ((USItype) (al)), \
121 "g" ((USItype) (bl)))
122 #define umul_ppmm(w1, w0, u, v) \
124 : "=a" ((USItype) (w0)), \
125 "=d" ((USItype) (w1)) \
126 : "%0" ((USItype) (u)), \
127 "rm" ((USItype) (v)))
128 #define udiv_qrnnd(q, r, n1, n0, dv) \
130 : "=a" ((USItype) (q)), \
131 "=d" ((USItype) (r)) \
132 : "0" ((USItype) (n0)), \
133 "1" ((USItype) (n1)), \
134 "rm" ((USItype) (dv)))
135 #define count_leading_zeros(count, x) \
138 __asm__ ("bsrl %1,%0" \
139 : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
140 (count) = __cbtmp ^ 31; \
143 #error unsupported CPU type
146 /* most of this code is taken from libgcc2.c from gcc */
148 static UDWtype
__udivmoddi4 (UDWtype n
, UDWtype d
, UDWtype
*rp
)
153 UWtype d0
, d1
, n0
, n1
, n2
;
165 #if !defined(UDIV_NEEDS_NORMALIZATION)
172 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
175 /* Remainder in n0. */
182 d0
= 1 / d0
; /* Divide intentionally by zero. */
184 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
185 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
187 /* Remainder in n0. */
198 #else /* UDIV_NEEDS_NORMALIZATION */
206 count_leading_zeros (bm
, d0
);
210 /* Normalize, i.e. make the most significant bit of the
214 n1
= (n1
<< bm
) | (n0
>> (W_TYPE_SIZE
- bm
));
218 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
221 /* Remainder in n0 >> bm. */
228 d0
= 1 / d0
; /* Divide intentionally by zero. */
230 count_leading_zeros (bm
, d0
);
234 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
235 conclude (the most significant bit of n1 is set) /\ (the
236 leading quotient digit q1 = 1).
238 This special case is necessary, not an optimization.
239 (Shifts counts of W_TYPE_SIZE are undefined.) */
248 b
= W_TYPE_SIZE
- bm
;
252 n1
= (n1
<< bm
) | (n0
>> b
);
255 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
260 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
262 /* Remainder in n0 >> bm. */
272 #endif /* UDIV_NEEDS_NORMALIZATION */
283 /* Remainder in n1n0. */
295 count_leading_zeros (bm
, d1
);
298 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
299 conclude (the most significant bit of n1 is set) /\ (the
300 quotient digit q0 = 0 or 1).
302 This special case is necessary, not an optimization. */
304 /* The condition on the next line takes advantage of that
305 n1 >= d1 (true due to program flow). */
306 if (n1
> d1
|| n0
>= d0
)
309 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
328 b
= W_TYPE_SIZE
- bm
;
330 d1
= (d1
<< bm
) | (d0
>> b
);
333 n1
= (n1
<< bm
) | (n0
>> b
);
336 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
337 umul_ppmm (m1
, m0
, q0
, d0
);
339 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
342 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
347 /* Remainder in (n1n0 - m1m0) >> bm. */
350 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
351 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
352 rr
.s
.high
= n1
>> bm
;
364 #define __negdi2(a) (-(a))
366 long long __divdi3(long long u
, long long v
)
377 uu
.ll
= __negdi2 (uu
.ll
);
381 vv
.ll
= __negdi2 (vv
.ll
);
383 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDWtype
*) 0);
389 long long __moddi3(long long u
, long long v
)
400 uu
.ll
= __negdi2 (uu
.ll
);
403 vv
.ll
= __negdi2 (vv
.ll
);
405 __udivmoddi4 (uu
.ll
, vv
.ll
, (UDWtype
*) &w
);
411 unsigned long long __udivdi3(unsigned long long u
, unsigned long long v
)
413 return __udivmoddi4 (u
, v
, (UDWtype
*) 0);
416 unsigned long long __umoddi3(unsigned long long u
, unsigned long long v
)
420 __udivmoddi4 (u
, v
, &w
);
424 /* XXX: fix tcc's code generator to do this instead */
425 long long __ashrdi3(long long a
, int b
)
431 u
.s
.low
= u
.s
.high
>> (b
- 32);
432 u
.s
.high
= u
.s
.high
>> 31;
434 u
.s
.low
= ((unsigned)u
.s
.low
>> b
) | (u
.s
.high
<< (32 - b
));
435 u
.s
.high
= u
.s
.high
>> b
;
443 /* XXX: fix tcc's code generator to do this instead */
444 unsigned long long __lshrdi3(unsigned long long a
, int b
)
450 u
.s
.low
= (unsigned)u
.s
.high
>> (b
- 32);
453 u
.s
.low
= ((unsigned)u
.s
.low
>> b
) | (u
.s
.high
<< (32 - b
));
454 u
.s
.high
= (unsigned)u
.s
.high
>> b
;
462 /* XXX: fix tcc's code generator to do this instead */
463 long long __ashldi3(long long a
, int b
)
469 u
.s
.high
= (unsigned)u
.s
.low
<< (b
- 32);
472 u
.s
.high
= ((unsigned)u
.s
.high
<< b
) | ((unsigned)u
.s
.low
>> (32 - b
));
473 u
.s
.low
= (unsigned)u
.s
.low
<< b
;
481 #endif /* !__x86_64__ */
483 /* XXX: fix tcc's code generator to do this instead */
484 float __floatundisf(unsigned long long a
)
490 if (uu
.s
.high
>= 0) {
494 r
+= 18446744073709551616.0;
499 double __floatundidf(unsigned long long a
)
505 if (uu
.s
.high
>= 0) {
506 return (double)uu
.ll
;
509 r
+= 18446744073709551616.0;
514 long double __floatundixf(unsigned long long a
)
520 if (uu
.s
.high
>= 0) {
521 return (long double)uu
.ll
;
524 r
+= 18446744073709551616.0;
525 return (long double)r
;
529 unsigned long long __fixunssfdi (float a1
)
531 register union float_long fl1
;
533 register unsigned long long l
;
540 exp
= EXP (fl1
.l
) - EXCESS
- 24;
552 l
= (unsigned long long)-l
;
556 long long __fixsfdi (float a1
)
558 long long ret
; int s
;
559 ret
= __fixunssfdi((s
= a1
>= 0) ? a1
: -a1
);
560 return s
? ret
: -ret
;
563 unsigned long long __fixunsdfdi (double a1
)
565 register union double_long dl1
;
567 register unsigned long long l
;
574 exp
= EXPD (dl1
) - EXCESSD
- 53;
578 return 1ULL << 63; /* overflow result (like gcc, somewhat) */
586 l
= (unsigned long long)-l
;
590 long long __fixdfdi (double a1
)
592 long long ret
; int s
;
593 ret
= __fixunsdfdi((s
= a1
>= 0) ? a1
: -a1
);
594 return s
? ret
: -ret
;
598 unsigned long long __fixunsxfdi (long double a1
)
600 register union ldouble_long dl1
;
602 register unsigned long long l
;
606 if (dl1
.l
.lower
== 0 && dl1
.l
.upper
== 0)
609 exp
= EXPLD (dl1
) - EXCESSLD
- 64;
617 l
= (unsigned long long)-l
;
621 long long __fixxfdi (long double a1
)
623 long long ret
; int s
;
624 ret
= __fixunsxfdi((s
= a1
>= 0) ? a1
: -a1
);
625 return s
? ret
: -ret
;
629 #if defined __x86_64__
630 /* float constants used for unary minus operation */
631 const float __mzerosf
= -0.0;
632 const double __mzerodf
= -0.0;
636 /* MSVC x64 intrinsic */
637 void __faststorefence(void)
639 __asm__("lock; orl $0,(%rsp)");