fix: try to add a bounds.o only if __bounds_init not found
[tinycc.git] / lib / libtcc1.c
blobece6413bf1df4f5d86b87317fca3e8ce825ba5b8
1 /* TCC runtime library.
2 Parts of this code are (c) 2002 Fabrice Bellard
4 Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
6 This file is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
9 later version.
11 In addition to the permissions in the GNU General Public License, the
12 Free Software Foundation gives you unlimited permission to link the
13 compiled version of this file into combinations with other programs,
14 and to distribute those combinations without any restriction coming
15 from the use of this file. (The General Public License restrictions
16 do apply in other respects; for example, they cover modification of
17 the file, and distribution when not linked into a combine
18 executable.)
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 59 Temple Place - Suite 330,
28 Boston, MA 02111-1307, USA.
31 #include <stdint.h>
33 #define W_TYPE_SIZE 32
34 #define BITS_PER_UNIT 8
36 typedef int Wtype;
37 typedef unsigned int UWtype;
38 typedef unsigned int USItype;
39 typedef long long DWtype;
40 typedef unsigned long long UDWtype;
42 struct DWstruct {
43 Wtype low, high;
46 typedef union
48 struct DWstruct s;
49 DWtype ll;
50 } DWunion;
52 typedef long double XFtype;
53 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
54 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
56 /* the following deal with IEEE single-precision numbers */
57 #define EXCESS 126
58 #define SIGNBIT 0x80000000
59 #define HIDDEN (1 << 23)
60 #define SIGN(fp) ((fp) & SIGNBIT)
61 #define EXP(fp) (((fp) >> 23) & 0xFF)
62 #define MANT(fp) (((fp) & 0x7FFFFF) | HIDDEN)
63 #define PACK(s,e,m) ((s) | ((e) << 23) | (m))
65 /* the following deal with IEEE double-precision numbers */
66 #define EXCESSD 1022
67 #define HIDDEND (1 << 20)
68 #define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF)
69 #define SIGND(fp) ((fp.l.upper) & SIGNBIT)
70 #define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
71 (fp.l.lower >> 22))
72 #define HIDDEND_LL ((long long)1 << 52)
73 #define MANTD_LL(fp) ((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL)
74 #define PACKD_LL(s,e,m) (((long long)((s)+((e)<<20))<<32)|(m))
76 /* the following deal with x86 long double-precision numbers */
77 #define EXCESSLD 16382
78 #define EXPLD(fp) (fp.l.upper & 0x7fff)
79 #define SIGNLD(fp) ((fp.l.upper) & 0x8000)
81 /* only for x86 */
82 union ldouble_long {
83 long double ld;
84 struct {
85 unsigned long long lower;
86 unsigned short upper;
87 } l;
90 union double_long {
91 double d;
92 #if 1
93 struct {
94 unsigned int lower;
95 int upper;
96 } l;
97 #else
98 struct {
99 int upper;
100 unsigned int lower;
101 } l;
102 #endif
103 long long ll;
106 union float_long {
107 float f;
108 long l;
111 /* XXX: we don't support several builtin supports for now */
112 #if !defined(TCC_TARGET_X86_64) && !defined(TCC_TARGET_ARM)
114 /* XXX: use gcc/tcc intrinsic ? */
115 #if defined(TCC_TARGET_I386)
116 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
117 __asm__ ("subl %5,%1\n\tsbbl %3,%0" \
118 : "=r" ((USItype) (sh)), \
119 "=&r" ((USItype) (sl)) \
120 : "0" ((USItype) (ah)), \
121 "g" ((USItype) (bh)), \
122 "1" ((USItype) (al)), \
123 "g" ((USItype) (bl)))
124 #define umul_ppmm(w1, w0, u, v) \
125 __asm__ ("mull %3" \
126 : "=a" ((USItype) (w0)), \
127 "=d" ((USItype) (w1)) \
128 : "%0" ((USItype) (u)), \
129 "rm" ((USItype) (v)))
130 #define udiv_qrnnd(q, r, n1, n0, dv) \
131 __asm__ ("divl %4" \
132 : "=a" ((USItype) (q)), \
133 "=d" ((USItype) (r)) \
134 : "0" ((USItype) (n0)), \
135 "1" ((USItype) (n1)), \
136 "rm" ((USItype) (dv)))
137 #define count_leading_zeros(count, x) \
138 do { \
139 USItype __cbtmp; \
140 __asm__ ("bsrl %1,%0" \
141 : "=r" (__cbtmp) : "rm" ((USItype) (x))); \
142 (count) = __cbtmp ^ 31; \
143 } while (0)
144 #else
145 #error unsupported CPU type
146 #endif
148 /* most of this code is taken from libgcc2.c from gcc */
150 static UDWtype __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp)
152 DWunion ww;
153 DWunion nn, dd;
154 DWunion rr;
155 UWtype d0, d1, n0, n1, n2;
156 UWtype q0, q1;
157 UWtype b, bm;
159 nn.ll = n;
160 dd.ll = d;
162 d0 = dd.s.low;
163 d1 = dd.s.high;
164 n0 = nn.s.low;
165 n1 = nn.s.high;
167 #if !defined(UDIV_NEEDS_NORMALIZATION)
168 if (d1 == 0)
170 if (d0 > n1)
172 /* 0q = nn / 0D */
174 udiv_qrnnd (q0, n0, n1, n0, d0);
175 q1 = 0;
177 /* Remainder in n0. */
179 else
181 /* qq = NN / 0d */
183 if (d0 == 0)
184 d0 = 1 / d0; /* Divide intentionally by zero. */
186 udiv_qrnnd (q1, n1, 0, n1, d0);
187 udiv_qrnnd (q0, n0, n1, n0, d0);
189 /* Remainder in n0. */
192 if (rp != 0)
194 rr.s.low = n0;
195 rr.s.high = 0;
196 *rp = rr.ll;
200 #else /* UDIV_NEEDS_NORMALIZATION */
202 if (d1 == 0)
204 if (d0 > n1)
206 /* 0q = nn / 0D */
208 count_leading_zeros (bm, d0);
210 if (bm != 0)
212 /* Normalize, i.e. make the most significant bit of the
213 denominator set. */
215 d0 = d0 << bm;
216 n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
217 n0 = n0 << bm;
220 udiv_qrnnd (q0, n0, n1, n0, d0);
221 q1 = 0;
223 /* Remainder in n0 >> bm. */
225 else
227 /* qq = NN / 0d */
229 if (d0 == 0)
230 d0 = 1 / d0; /* Divide intentionally by zero. */
232 count_leading_zeros (bm, d0);
234 if (bm == 0)
236 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
237 conclude (the most significant bit of n1 is set) /\ (the
238 leading quotient digit q1 = 1).
240 This special case is necessary, not an optimization.
241 (Shifts counts of W_TYPE_SIZE are undefined.) */
243 n1 -= d0;
244 q1 = 1;
246 else
248 /* Normalize. */
250 b = W_TYPE_SIZE - bm;
252 d0 = d0 << bm;
253 n2 = n1 >> b;
254 n1 = (n1 << bm) | (n0 >> b);
255 n0 = n0 << bm;
257 udiv_qrnnd (q1, n1, n2, n1, d0);
260 /* n1 != d0... */
262 udiv_qrnnd (q0, n0, n1, n0, d0);
264 /* Remainder in n0 >> bm. */
267 if (rp != 0)
269 rr.s.low = n0 >> bm;
270 rr.s.high = 0;
271 *rp = rr.ll;
274 #endif /* UDIV_NEEDS_NORMALIZATION */
276 else
278 if (d1 > n1)
280 /* 00 = nn / DD */
282 q0 = 0;
283 q1 = 0;
285 /* Remainder in n1n0. */
286 if (rp != 0)
288 rr.s.low = n0;
289 rr.s.high = n1;
290 *rp = rr.ll;
293 else
295 /* 0q = NN / dd */
297 count_leading_zeros (bm, d1);
298 if (bm == 0)
300 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
301 conclude (the most significant bit of n1 is set) /\ (the
302 quotient digit q0 = 0 or 1).
304 This special case is necessary, not an optimization. */
306 /* The condition on the next line takes advantage of that
307 n1 >= d1 (true due to program flow). */
308 if (n1 > d1 || n0 >= d0)
310 q0 = 1;
311 sub_ddmmss (n1, n0, n1, n0, d1, d0);
313 else
314 q0 = 0;
316 q1 = 0;
318 if (rp != 0)
320 rr.s.low = n0;
321 rr.s.high = n1;
322 *rp = rr.ll;
325 else
327 UWtype m1, m0;
328 /* Normalize. */
330 b = W_TYPE_SIZE - bm;
332 d1 = (d1 << bm) | (d0 >> b);
333 d0 = d0 << bm;
334 n2 = n1 >> b;
335 n1 = (n1 << bm) | (n0 >> b);
336 n0 = n0 << bm;
338 udiv_qrnnd (q0, n1, n2, n1, d1);
339 umul_ppmm (m1, m0, q0, d0);
341 if (m1 > n1 || (m1 == n1 && m0 > n0))
343 q0--;
344 sub_ddmmss (m1, m0, m1, m0, d1, d0);
347 q1 = 0;
349 /* Remainder in (n1n0 - m1m0) >> bm. */
350 if (rp != 0)
352 sub_ddmmss (n1, n0, n1, n0, m1, m0);
353 rr.s.low = (n1 << b) | (n0 >> bm);
354 rr.s.high = n1 >> bm;
355 *rp = rr.ll;
361 ww.s.low = q0;
362 ww.s.high = q1;
363 return ww.ll;
366 #define __negdi2(a) (-(a))
368 long long __divdi3(long long u, long long v)
370 int c = 0;
371 DWunion uu, vv;
372 DWtype w;
374 uu.ll = u;
375 vv.ll = v;
377 if (uu.s.high < 0) {
378 c = ~c;
379 uu.ll = __negdi2 (uu.ll);
381 if (vv.s.high < 0) {
382 c = ~c;
383 vv.ll = __negdi2 (vv.ll);
385 w = __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) 0);
386 if (c)
387 w = __negdi2 (w);
388 return w;
391 long long __moddi3(long long u, long long v)
393 int c = 0;
394 DWunion uu, vv;
395 DWtype w;
397 uu.ll = u;
398 vv.ll = v;
400 if (uu.s.high < 0) {
401 c = ~c;
402 uu.ll = __negdi2 (uu.ll);
404 if (vv.s.high < 0)
405 vv.ll = __negdi2 (vv.ll);
407 __udivmoddi4 (uu.ll, vv.ll, (UDWtype *) &w);
408 if (c)
409 w = __negdi2 (w);
410 return w;
413 unsigned long long __udivdi3(unsigned long long u, unsigned long long v)
415 return __udivmoddi4 (u, v, (UDWtype *) 0);
418 unsigned long long __umoddi3(unsigned long long u, unsigned long long v)
420 UDWtype w;
422 __udivmoddi4 (u, v, &w);
423 return w;
426 /* XXX: fix tcc's code generator to do this instead */
427 long long __ashrdi3(long long a, int b)
429 #ifdef __TINYC__
430 DWunion u;
431 u.ll = a;
432 if (b >= 32) {
433 u.s.low = u.s.high >> (b - 32);
434 u.s.high = u.s.high >> 31;
435 } else if (b != 0) {
436 u.s.low = ((unsigned)u.s.low >> b) | (u.s.high << (32 - b));
437 u.s.high = u.s.high >> b;
439 return u.ll;
440 #else
441 return a >> b;
442 #endif
445 /* XXX: fix tcc's code generator to do this instead */
446 unsigned long long __lshrdi3(unsigned long long a, int b)
448 #ifdef __TINYC__
449 DWunion u;
450 u.ll = a;
451 if (b >= 32) {
452 u.s.low = (unsigned)u.s.high >> (b - 32);
453 u.s.high = 0;
454 } else if (b != 0) {
455 u.s.low = ((unsigned)u.s.low >> b) | (u.s.high << (32 - b));
456 u.s.high = (unsigned)u.s.high >> b;
458 return u.ll;
459 #else
460 return a >> b;
461 #endif
464 /* XXX: fix tcc's code generator to do this instead */
465 long long __ashldi3(long long a, int b)
467 #ifdef __TINYC__
468 DWunion u;
469 u.ll = a;
470 if (b >= 32) {
471 u.s.high = (unsigned)u.s.low << (b - 32);
472 u.s.low = 0;
473 } else if (b != 0) {
474 u.s.high = ((unsigned)u.s.high << b) | ((unsigned)u.s.low >> (32 - b));
475 u.s.low = (unsigned)u.s.low << b;
477 return u.ll;
478 #else
479 return a << b;
480 #endif
483 #ifndef COMMIT_4ad186c5ef61_IS_FIXED
484 long long __tcc_cvt_ftol(long double x)
486 unsigned c0, c1;
487 long long ret;
488 __asm__ __volatile__ ("fnstcw %0" : "=m" (c0));
489 c1 = c0 | 0x0C00;
490 __asm__ __volatile__ ("fldcw %0" : : "m" (c1));
491 __asm__ __volatile__ ("fistpll %0" : "=m" (ret));
492 __asm__ __volatile__ ("fldcw %0" : : "m" (c0));
493 return ret;
495 #endif
497 #endif /* !__x86_64__ */
499 /* XXX: fix tcc's code generator to do this instead */
500 float __floatundisf(unsigned long long a)
502 DWunion uu;
503 XFtype r;
505 uu.ll = a;
506 if (uu.s.high >= 0) {
507 return (float)uu.ll;
508 } else {
509 r = (XFtype)uu.ll;
510 r += 18446744073709551616.0;
511 return (float)r;
515 double __floatundidf(unsigned long long a)
517 DWunion uu;
518 XFtype r;
520 uu.ll = a;
521 if (uu.s.high >= 0) {
522 return (double)uu.ll;
523 } else {
524 r = (XFtype)uu.ll;
525 r += 18446744073709551616.0;
526 return (double)r;
530 long double __floatundixf(unsigned long long a)
532 DWunion uu;
533 XFtype r;
535 uu.ll = a;
536 if (uu.s.high >= 0) {
537 return (long double)uu.ll;
538 } else {
539 r = (XFtype)uu.ll;
540 r += 18446744073709551616.0;
541 return (long double)r;
545 unsigned long long __fixunssfdi (float a1)
547 register union float_long fl1;
548 register int exp;
549 register unsigned long l;
551 fl1.f = a1;
553 if (fl1.l == 0)
554 return (0);
556 exp = EXP (fl1.l) - EXCESS - 24;
558 l = MANT(fl1.l);
559 if (exp >= 41)
560 return (unsigned long long)-1;
561 else if (exp >= 0)
562 return (unsigned long long)l << exp;
563 else if (exp >= -23)
564 return l >> -exp;
565 else
566 return 0;
569 unsigned long long __fixunsdfdi (double a1)
571 register union double_long dl1;
572 register int exp;
573 register unsigned long long l;
575 dl1.d = a1;
577 if (dl1.ll == 0)
578 return (0);
580 exp = EXPD (dl1) - EXCESSD - 53;
582 l = MANTD_LL(dl1);
584 if (exp >= 12)
585 return (unsigned long long)-1;
586 else if (exp >= 0)
587 return l << exp;
588 else if (exp >= -52)
589 return l >> -exp;
590 else
591 return 0;
594 unsigned long long __fixunsxfdi (long double a1)
596 register union ldouble_long dl1;
597 register int exp;
598 register unsigned long long l;
600 dl1.ld = a1;
602 if (dl1.l.lower == 0 && dl1.l.upper == 0)
603 return (0);
605 exp = EXPLD (dl1) - EXCESSLD - 64;
607 l = dl1.l.lower;
609 if (exp > 0)
610 return (unsigned long long)-1;
611 else if (exp >= -63)
612 return l >> -exp;
613 else
614 return 0;
617 long long __fixsfdi (float a1)
619 long long ret; int s;
620 ret = __fixunssfdi((s = a1 >= 0) ? a1 : -a1);
621 return s ? ret : -ret;
624 long long __fixdfdi (double a1)
626 long long ret; int s;
627 ret = __fixunsdfdi((s = a1 >= 0) ? a1 : -a1);
628 return s ? ret : -ret;
631 long long __fixxfdi (long double a1)
633 long long ret; int s;
634 ret = __fixunsxfdi((s = a1 >= 0) ? a1 : -a1);
635 return s ? ret : -ret;
638 #if defined(TCC_TARGET_X86_64) && !defined(_WIN64)
640 #ifndef __TINYC__
641 #include <stdlib.h>
642 #include <stdio.h>
643 #include <string.h>
644 #else
645 /* Avoid including stdlib.h because it is not easily available when
646 cross compiling */
647 #include <stddef.h> /* size_t definition is needed for a x86_64-tcc to parse memset() */
648 extern void *malloc(unsigned long long);
649 extern void *memset(void *s, int c, size_t n);
650 extern void free(void*);
651 extern void abort(void);
652 #endif
654 enum __va_arg_type {
655 __va_gen_reg, __va_float_reg, __va_stack
658 //This should be in sync with the declaration on our include/stdarg.h
659 /* GCC compatible definition of va_list. */
660 typedef struct {
661 unsigned int gp_offset;
662 unsigned int fp_offset;
663 union {
664 unsigned int overflow_offset;
665 char *overflow_arg_area;
667 char *reg_save_area;
668 } __va_list_struct;
670 #undef __va_start
671 #undef __va_arg
672 #undef __va_copy
673 #undef __va_end
675 void __va_start(__va_list_struct *ap, void *fp)
677 memset(ap, 0, sizeof(__va_list_struct));
678 *ap = *(__va_list_struct *)((char *)fp - 16);
679 ap->overflow_arg_area = (char *)fp + ap->overflow_offset;
680 ap->reg_save_area = (char *)fp - 176 - 16;
683 void *__va_arg(__va_list_struct *ap,
684 enum __va_arg_type arg_type,
685 int size, int align)
687 size = (size + 7) & ~7;
688 align = (align + 7) & ~7;
689 switch (arg_type) {
690 case __va_gen_reg:
691 if (ap->gp_offset + size <= 48) {
692 ap->gp_offset += size;
693 return ap->reg_save_area + ap->gp_offset - size;
695 goto use_overflow_area;
697 case __va_float_reg:
698 if (ap->fp_offset < 128 + 48) {
699 ap->fp_offset += 16;
700 return ap->reg_save_area + ap->fp_offset - 16;
702 size = 8;
703 goto use_overflow_area;
705 case __va_stack:
706 use_overflow_area:
707 ap->overflow_arg_area += size;
708 ap->overflow_arg_area = (char*)((intptr_t)(ap->overflow_arg_area + align - 1) & -(intptr_t)align);
709 return ap->overflow_arg_area - size;
711 default:
712 #ifndef __TINYC__
713 fprintf(stderr, "unknown ABI type for __va_arg\n");
714 #endif
715 abort();
719 #endif /* __x86_64__ */
721 /* Flushing for tccrun */
722 #if defined(TCC_TARGET_X86_64) || defined(TCC_TARGET_I386)
724 void __clear_cache(char *beginning, char *end)
728 #elif defined(TCC_TARGET_ARM)
730 #define _GNU_SOURCE
731 #include <unistd.h>
732 #include <sys/syscall.h>
733 #include <stdio.h>
735 void __clear_cache(char *beginning, char *end)
737 /* __ARM_NR_cacheflush is kernel private and should not be used in user space.
738 * However, there is no ARM asm parser in tcc so we use it for now */
739 #if 1
740 syscall(__ARM_NR_cacheflush, beginning, end, 0);
741 #else
742 __asm__ ("push {r7}\n\t"
743 "mov r7, #0xf0002\n\t"
744 "mov r2, #0\n\t"
745 "swi 0\n\t"
746 "pop {r7}\n\t"
747 "ret");
748 #endif
751 #else
752 #warning __clear_cache not defined for this architecture, avoid using tcc -run
753 #endif