1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License. */
29 /* It is incorrect to include config.h here, because this file is being
30 compiled for the target, and hence definitions concerning only the host
35 /* We disable this when inhibit_libc, so that gcc can still be built without
36 needing header files first. */
37 /* ??? This is not a good solution, since prototypes may be required in
38 some cases for correct code. See also frame.c. */
40 /* fixproto guarantees these system headers exist. */
48 extern void *malloc (size_t);
51 extern void free (void *);
54 extern int atexit(void (*)(void));
65 /* Don't use `fancy_abort' here even if config.h says to use it. */
70 /* In a cross-compilation situation, default to inhibiting compilation
71 of routines that use libc. */
73 #if defined(CROSS_COMPILE) && !defined(inhibit_libc)
77 /* Permit the tm.h file to select the endianness to use just for this
78 file. This is used when the endianness is determined when the
81 #ifndef LIBGCC2_WORDS_BIG_ENDIAN
82 #define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
85 #ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
86 #define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
89 /* In the first part of this file, we are interfacing to calls generated
90 by the compiler itself. These calls pass values into these routines
91 which have very specific modes (rather than very specific types), and
92 these compiler-generated calls also expect any return values to have
93 very specific modes (rather than very specific types). Thus, we need
94 to avoid using regular C language type names in this part of the file
95 because the sizes for those types can be configured to be anything.
96 Instead we use the following special type names. */
98 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
99 typedef int SItype
__attribute__ ((mode (SI
)));
100 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
101 typedef int DItype
__attribute__ ((mode (DI
)));
102 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
104 typedef float SFtype
__attribute__ ((mode (SF
)));
105 typedef float DFtype
__attribute__ ((mode (DF
)));
107 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
108 typedef float XFtype
__attribute__ ((mode (XF
)));
110 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
111 typedef float TFtype
__attribute__ ((mode (TF
)));
114 typedef int word_type
__attribute__ ((mode (__word__
)));
116 /* Make sure that we don't accidentally use any normal C language built-in
117 type names in the first part of this file. Instead we want to use *only*
118 the type names defined above. The following macro definitions insure
119 that if we *do* accidentally use some normal C language built-in type name,
120 we will get a syntax error. */
122 #define char bogus_type
123 #define short bogus_type
124 #define int bogus_type
125 #define long bogus_type
126 #define unsigned bogus_type
127 #define float bogus_type
128 #define double bogus_type
130 #define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
132 /* DIstructs are pairs of SItype values in the order determined by
133 LIBGCC2_WORDS_BIG_ENDIAN. */
135 #if LIBGCC2_WORDS_BIG_ENDIAN
136 struct DIstruct
{SItype high
, low
;};
138 struct DIstruct
{SItype low
, high
;};
141 /* We need this union to unpack/pack DImode values, since we don't have
142 any arithmetic yet. Incoming DImode parameters are stored into the
143 `ll' field, and the unpacked result is read from the struct `s'. */
151 #if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\
152 || defined (L_divdi3) || defined (L_udivdi3) \
153 || defined (L_moddi3) || defined (L_umoddi3))
155 #include "longlong.h"
157 #endif /* udiv or mul */
159 extern DItype
__fixunssfdi (SFtype a
);
160 extern DItype
__fixunsdfdi (DFtype a
);
161 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
162 extern DItype
__fixunsxfdi (XFtype a
);
164 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
165 extern DItype
__fixunstfdi (TFtype a
);
168 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
169 #if defined (L_divdi3) || defined (L_moddi3)
181 w
.s
.high
= -uu
.s
.high
- ((USItype
) w
.s
.low
> 0);
187 /* Unless shift functions are defined whith full ANSI prototypes,
188 parameter b will be promoted to int if word_type is smaller than an int. */
191 __lshrdi3 (DItype u
, word_type b
)
202 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
206 w
.s
.low
= (USItype
)uu
.s
.high
>> -bm
;
210 USItype carries
= (USItype
)uu
.s
.high
<< bm
;
211 w
.s
.high
= (USItype
)uu
.s
.high
>> b
;
212 w
.s
.low
= ((USItype
)uu
.s
.low
>> b
) | carries
;
221 __ashldi3 (DItype u
, word_type b
)
232 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
236 w
.s
.high
= (USItype
)uu
.s
.low
<< -bm
;
240 USItype carries
= (USItype
)uu
.s
.low
>> bm
;
241 w
.s
.low
= (USItype
)uu
.s
.low
<< b
;
242 w
.s
.high
= ((USItype
)uu
.s
.high
<< b
) | carries
;
251 __ashrdi3 (DItype u
, word_type b
)
262 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
265 /* w.s.high = 1..1 or 0..0 */
266 w
.s
.high
= uu
.s
.high
>> (sizeof (SItype
) * BITS_PER_UNIT
- 1);
267 w
.s
.low
= uu
.s
.high
>> -bm
;
271 USItype carries
= (USItype
)uu
.s
.high
<< bm
;
272 w
.s
.high
= uu
.s
.high
>> b
;
273 w
.s
.low
= ((USItype
)uu
.s
.low
>> b
) | carries
;
287 w
.s
.low
= ffs (uu
.s
.low
);
290 w
.s
.low
= ffs (uu
.s
.high
);
293 w
.s
.low
+= BITS_PER_UNIT
* sizeof (SItype
);
302 __muldi3 (DItype u
, DItype v
)
310 w
.ll
= __umulsidi3 (uu
.s
.low
, vv
.s
.low
);
311 w
.s
.high
+= ((USItype
) uu
.s
.low
* (USItype
) vv
.s
.high
312 + (USItype
) uu
.s
.high
* (USItype
) vv
.s
.low
);
319 #if defined (sdiv_qrnnd)
321 __udiv_w_sdiv (USItype
*rp
, USItype a1
, USItype a0
, USItype d
)
328 if (a1
< d
- a1
- (a0
>> (SI_TYPE_SIZE
- 1)))
330 /* dividend, divisor, and quotient are nonnegative */
331 sdiv_qrnnd (q
, r
, a1
, a0
, d
);
335 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
336 sub_ddmmss (c1
, c0
, a1
, a0
, d
>> 1, d
<< (SI_TYPE_SIZE
- 1));
337 /* Divide (c1*2^32 + c0) by d */
338 sdiv_qrnnd (q
, r
, c1
, c0
, d
);
339 /* Add 2^31 to quotient */
340 q
+= (USItype
) 1 << (SI_TYPE_SIZE
- 1);
345 b1
= d
>> 1; /* d/2, between 2^30 and 2^31 - 1 */
346 c1
= a1
>> 1; /* A/2 */
347 c0
= (a1
<< (SI_TYPE_SIZE
- 1)) + (a0
>> 1);
349 if (a1
< b1
) /* A < 2^32*b1, so A/2 < 2^31*b1 */
351 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
353 r
= 2*r
+ (a0
& 1); /* Remainder from A/(2*b1) */
370 else if (c1
< b1
) /* So 2^31 <= (A/2)/b1 < 2^32 */
373 c0
= ~c0
; /* logical NOT */
375 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
377 q
= ~q
; /* (A/2)/b1 */
380 r
= 2*r
+ (a0
& 1); /* A/(2*b1) */
398 else /* Implies c1 = b1 */
399 { /* Hence a1 = d - 1 = 2*b1 - 1 */
417 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
419 __udiv_w_sdiv (USItype
*rp
__attribute__ ((__unused__
)),
420 USItype a1
__attribute__ ((__unused__
)),
421 USItype a0
__attribute__ ((__unused__
)),
422 USItype d
__attribute__ ((__unused__
)))
429 #if (defined (L_udivdi3) || defined (L_divdi3) || \
430 defined (L_umoddi3) || defined (L_moddi3))
435 static const UQItype __clz_tab
[] =
437 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
438 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
439 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
440 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
441 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
442 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
443 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
444 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
447 #if (defined (L_udivdi3) || defined (L_divdi3) || \
448 defined (L_umoddi3) || defined (L_moddi3))
452 __udivmoddi4 (UDItype n
, UDItype d
, UDItype
*rp
)
457 USItype d0
, d1
, n0
, n1
, n2
;
469 #if !UDIV_NEEDS_NORMALIZATION
476 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
479 /* Remainder in n0. */
486 d0
= 1 / d0
; /* Divide intentionally by zero. */
488 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
489 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
491 /* Remainder in n0. */
502 #else /* UDIV_NEEDS_NORMALIZATION */
510 count_leading_zeros (bm
, d0
);
514 /* Normalize, i.e. make the most significant bit of the
518 n1
= (n1
<< bm
) | (n0
>> (SI_TYPE_SIZE
- bm
));
522 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
525 /* Remainder in n0 >> bm. */
532 d0
= 1 / d0
; /* Divide intentionally by zero. */
534 count_leading_zeros (bm
, d0
);
538 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
539 conclude (the most significant bit of n1 is set) /\ (the
540 leading quotient digit q1 = 1).
542 This special case is necessary, not an optimization.
543 (Shifts counts of SI_TYPE_SIZE are undefined.) */
552 b
= SI_TYPE_SIZE
- bm
;
556 n1
= (n1
<< bm
) | (n0
>> b
);
559 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
564 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
566 /* Remainder in n0 >> bm. */
576 #endif /* UDIV_NEEDS_NORMALIZATION */
587 /* Remainder in n1n0. */
599 count_leading_zeros (bm
, d1
);
602 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
603 conclude (the most significant bit of n1 is set) /\ (the
604 quotient digit q0 = 0 or 1).
606 This special case is necessary, not an optimization. */
608 /* The condition on the next line takes advantage of that
609 n1 >= d1 (true due to program flow). */
610 if (n1
> d1
|| n0
>= d0
)
613 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
632 b
= SI_TYPE_SIZE
- bm
;
634 d1
= (d1
<< bm
) | (d0
>> b
);
637 n1
= (n1
<< bm
) | (n0
>> b
);
640 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
641 umul_ppmm (m1
, m0
, q0
, d0
);
643 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
646 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
651 /* Remainder in (n1n0 - m1m0) >> bm. */
654 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
655 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
656 rr
.s
.high
= n1
>> bm
;
670 UDItype
__udivmoddi4 ();
673 __divdi3 (DItype u
, DItype v
)
684 uu
.ll
= __negdi2 (uu
.ll
);
687 vv
.ll
= __negdi2 (vv
.ll
);
689 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDItype
*) 0);
698 UDItype
__udivmoddi4 ();
700 __moddi3 (DItype u
, DItype v
)
711 uu
.ll
= __negdi2 (uu
.ll
);
713 vv
.ll
= __negdi2 (vv
.ll
);
715 (void) __udivmoddi4 (uu
.ll
, vv
.ll
, &w
);
724 UDItype
__udivmoddi4 ();
726 __umoddi3 (UDItype u
, UDItype v
)
730 (void) __udivmoddi4 (u
, v
, &w
);
737 UDItype
__udivmoddi4 ();
739 __udivdi3 (UDItype n
, UDItype d
)
741 return __udivmoddi4 (n
, d
, (UDItype
*) 0);
747 __cmpdi2 (DItype a
, DItype b
)
751 au
.ll
= a
, bu
.ll
= b
;
753 if (au
.s
.high
< bu
.s
.high
)
755 else if (au
.s
.high
> bu
.s
.high
)
757 if ((USItype
) au
.s
.low
< (USItype
) bu
.s
.low
)
759 else if ((USItype
) au
.s
.low
> (USItype
) bu
.s
.low
)
767 __ucmpdi2 (DItype a
, DItype b
)
771 au
.ll
= a
, bu
.ll
= b
;
773 if ((USItype
) au
.s
.high
< (USItype
) bu
.s
.high
)
775 else if ((USItype
) au
.s
.high
> (USItype
) bu
.s
.high
)
777 if ((USItype
) au
.s
.low
< (USItype
) bu
.s
.low
)
779 else if ((USItype
) au
.s
.low
> (USItype
) bu
.s
.low
)
785 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
786 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
787 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
790 __fixunstfdi (TFtype a
)
798 /* Compute high word of result, as a flonum. */
799 b
= (a
/ HIGH_WORD_COEFF
);
800 /* Convert that to fixed (but not to DItype!),
801 and shift it into the high word. */
804 /* Remove high part from the TFtype, leaving the low part as flonum. */
806 /* Convert that to fixed (but not to DItype!) and add it in.
807 Sometimes A comes out negative. This is significant, since
808 A has more bits than a long int does. */
810 v
-= (USItype
) (- a
);
817 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
822 return - __fixunstfdi (-a
);
823 return __fixunstfdi (a
);
827 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
828 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
829 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
832 __fixunsxfdi (XFtype a
)
840 /* Compute high word of result, as a flonum. */
841 b
= (a
/ HIGH_WORD_COEFF
);
842 /* Convert that to fixed (but not to DItype!),
843 and shift it into the high word. */
846 /* Remove high part from the XFtype, leaving the low part as flonum. */
848 /* Convert that to fixed (but not to DItype!) and add it in.
849 Sometimes A comes out negative. This is significant, since
850 A has more bits than a long int does. */
852 v
-= (USItype
) (- a
);
859 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
864 return - __fixunsxfdi (-a
);
865 return __fixunsxfdi (a
);
870 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
871 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
874 __fixunsdfdi (DFtype a
)
882 /* Compute high word of result, as a flonum. */
883 b
= (a
/ HIGH_WORD_COEFF
);
884 /* Convert that to fixed (but not to DItype!),
885 and shift it into the high word. */
888 /* Remove high part from the DFtype, leaving the low part as flonum. */
890 /* Convert that to fixed (but not to DItype!) and add it in.
891 Sometimes A comes out negative. This is significant, since
892 A has more bits than a long int does. */
894 v
-= (USItype
) (- a
);
906 return - __fixunsdfdi (-a
);
907 return __fixunsdfdi (a
);
912 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
913 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
916 __fixunssfdi (SFtype original_a
)
918 /* Convert the SFtype to a DFtype, because that is surely not going
919 to lose any bits. Some day someone else can write a faster version
920 that avoids converting to DFtype, and verify it really works right. */
921 DFtype a
= original_a
;
928 /* Compute high word of result, as a flonum. */
929 b
= (a
/ HIGH_WORD_COEFF
);
930 /* Convert that to fixed (but not to DItype!),
931 and shift it into the high word. */
934 /* Remove high part from the DFtype, leaving the low part as flonum. */
936 /* Convert that to fixed (but not to DItype!) and add it in.
937 Sometimes A comes out negative. This is significant, since
938 A has more bits than a long int does. */
940 v
-= (USItype
) (- a
);
952 return - __fixunssfdi (-a
);
953 return __fixunssfdi (a
);
957 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
958 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
959 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
960 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
963 __floatdixf (DItype u
)
967 d
= (SItype
) (u
>> WORD_SIZE
);
968 d
*= HIGH_HALFWORD_COEFF
;
969 d
*= HIGH_HALFWORD_COEFF
;
970 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
976 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
977 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
978 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
979 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
982 __floatditf (DItype u
)
986 d
= (SItype
) (u
>> WORD_SIZE
);
987 d
*= HIGH_HALFWORD_COEFF
;
988 d
*= HIGH_HALFWORD_COEFF
;
989 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
996 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
997 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
998 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
1001 __floatdidf (DItype u
)
1005 d
= (SItype
) (u
>> WORD_SIZE
);
1006 d
*= HIGH_HALFWORD_COEFF
;
1007 d
*= HIGH_HALFWORD_COEFF
;
1008 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
1015 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
1016 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
1017 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
1018 #define DI_SIZE (sizeof (DItype) * BITS_PER_UNIT)
1020 /* Define codes for all the float formats that we know of. Note
1021 that this is copied from real.h. */
1023 #define UNKNOWN_FLOAT_FORMAT 0
1024 #define IEEE_FLOAT_FORMAT 1
1025 #define VAX_FLOAT_FORMAT 2
1026 #define IBM_FLOAT_FORMAT 3
1028 /* Default to IEEE float if not specified. Nearly all machines use it. */
1029 #ifndef HOST_FLOAT_FORMAT
1030 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1033 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1038 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1043 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1049 __floatdisf (DItype u
)
1051 /* Do the calculation in DFmode
1052 so that we don't lose any of the precision of the high word
1053 while multiplying it. */
1056 /* Protect against double-rounding error.
1057 Represent any low-order bits, that might be truncated in DFmode,
1058 by a bit that won't be lost. The bit can go in anywhere below the
1059 rounding position of the SFmode. A fixed mask and bit position
1060 handles all usual configurations. It doesn't handle the case
1061 of 128-bit DImode, however. */
1062 if (DF_SIZE
< DI_SIZE
1063 && DF_SIZE
> (DI_SIZE
- DF_SIZE
+ SF_SIZE
))
1065 #define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
1066 if (! (- ((DItype
) 1 << DF_SIZE
) < u
1067 && u
< ((DItype
) 1 << DF_SIZE
)))
1069 if ((USItype
) u
& (REP_BIT
- 1))
1073 f
= (SItype
) (u
>> WORD_SIZE
);
1074 f
*= HIGH_HALFWORD_COEFF
;
1075 f
*= HIGH_HALFWORD_COEFF
;
1076 f
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
1082 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1083 /* Reenable the normal types, in case limits.h needs them. */
1096 __fixunsxfsi (XFtype a
)
1098 if (a
>= - (DFtype
) LONG_MIN
)
1099 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1105 /* Reenable the normal types, in case limits.h needs them. */
1118 __fixunsdfsi (DFtype a
)
1120 if (a
>= - (DFtype
) LONG_MIN
)
1121 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1127 /* Reenable the normal types, in case limits.h needs them. */
1140 __fixunssfsi (SFtype a
)
1142 if (a
>= - (SFtype
) LONG_MIN
)
1143 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1148 /* From here on down, the routines use normal data types. */
1150 #define SItype bogus_type
1151 #define USItype bogus_type
1152 #define DItype bogus_type
1153 #define UDItype bogus_type
1154 #define SFtype bogus_type
1155 #define DFtype bogus_type
1167 /* Like bcmp except the sign is meaningful.
1168 Result is negative if S1 is less than S2,
1169 positive if S1 is greater, 0 if S1 and S2 are equal. */
1172 __gcc_bcmp (unsigned char *s1
, unsigned char *s2
, size_t size
)
1176 unsigned char c1
= *s1
++, c2
= *s2
++;
1193 #if defined(__svr4__) || defined(__alliant__)
1197 /* The Alliant needs the added underscore. */
1198 asm (".globl __builtin_saveregs");
1199 asm ("__builtin_saveregs:");
1200 asm (".globl ___builtin_saveregs");
1201 asm ("___builtin_saveregs:");
1203 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1204 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1205 area and also for a new va_list
1207 /* Save all argument registers in the arg reg save area. The
1208 arg reg save area must have the following layout (according
1220 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1221 asm (" fst.q %f12,16(%sp)");
1223 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1224 asm (" st.l %r17,36(%sp)");
1225 asm (" st.l %r18,40(%sp)");
1226 asm (" st.l %r19,44(%sp)");
1227 asm (" st.l %r20,48(%sp)");
1228 asm (" st.l %r21,52(%sp)");
1229 asm (" st.l %r22,56(%sp)");
1230 asm (" st.l %r23,60(%sp)");
1231 asm (" st.l %r24,64(%sp)");
1232 asm (" st.l %r25,68(%sp)");
1233 asm (" st.l %r26,72(%sp)");
1234 asm (" st.l %r27,76(%sp)");
1236 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1237 va_list structure. Put in into
1238 r16 so that it will be returned
1241 /* Initialize all fields of the new va_list structure. This
1242 structure looks like:
1245 unsigned long ireg_used;
1246 unsigned long freg_used;
1252 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1253 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1254 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1255 asm (" bri %r1"); /* delayed return */
1256 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1258 #else /* not __svr4__ */
1259 #if defined(__PARAGON__)
1261 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1262 * and we stand a better chance of hooking into libraries
1263 * compiled by PGI. [andyp@ssd.intel.com]
1267 asm (".globl __builtin_saveregs");
1268 asm ("__builtin_saveregs:");
1269 asm (".globl ___builtin_saveregs");
1270 asm ("___builtin_saveregs:");
1272 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1273 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1274 area and also for a new va_list
1276 /* Save all argument registers in the arg reg save area. The
1277 arg reg save area must have the following layout (according
1289 asm (" fst.q f8, 0(sp)");
1290 asm (" fst.q f12,16(sp)");
1291 asm (" st.l r16,32(sp)");
1292 asm (" st.l r17,36(sp)");
1293 asm (" st.l r18,40(sp)");
1294 asm (" st.l r19,44(sp)");
1295 asm (" st.l r20,48(sp)");
1296 asm (" st.l r21,52(sp)");
1297 asm (" st.l r22,56(sp)");
1298 asm (" st.l r23,60(sp)");
1299 asm (" st.l r24,64(sp)");
1300 asm (" st.l r25,68(sp)");
1301 asm (" st.l r26,72(sp)");
1302 asm (" st.l r27,76(sp)");
1304 asm (" adds 80,sp,r16"); /* compute the address of the new
1305 va_list structure. Put in into
1306 r16 so that it will be returned
1309 /* Initialize all fields of the new va_list structure. This
1310 structure looks like:
1313 unsigned long ireg_used;
1314 unsigned long freg_used;
1320 asm (" st.l r0, 0(r16)"); /* nfixed */
1321 asm (" st.l r0, 4(r16)"); /* nfloating */
1322 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1323 asm (" bri r1"); /* delayed return */
1324 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1325 #else /* not __PARAGON__ */
1329 asm (".globl ___builtin_saveregs");
1330 asm ("___builtin_saveregs:");
1331 asm (" mov sp,r30");
1332 asm (" andnot 0x0f,sp,sp");
1333 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1335 /* Fill in the __va_struct. */
1336 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1337 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1338 asm (" st.l r18, 8(sp)");
1339 asm (" st.l r19,12(sp)");
1340 asm (" st.l r20,16(sp)");
1341 asm (" st.l r21,20(sp)");
1342 asm (" st.l r22,24(sp)");
1343 asm (" st.l r23,28(sp)");
1344 asm (" st.l r24,32(sp)");
1345 asm (" st.l r25,36(sp)");
1346 asm (" st.l r26,40(sp)");
1347 asm (" st.l r27,44(sp)");
1349 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1350 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1352 /* Fill in the __va_ctl. */
1353 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1354 asm (" st.l r28,84(sp)"); /* pointer to more args */
1355 asm (" st.l r0, 88(sp)"); /* nfixed */
1356 asm (" st.l r0, 92(sp)"); /* nfloating */
1358 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1360 asm (" mov r30,sp");
1361 /* recover stack and pass address to start
1363 #endif /* not __PARAGON__ */
1364 #endif /* not __svr4__ */
1365 #else /* not __i860__ */
1367 asm (".global __builtin_saveregs");
1368 asm ("__builtin_saveregs:");
1369 asm (".global ___builtin_saveregs");
1370 asm ("___builtin_saveregs:");
1371 #ifdef NEED_PROC_COMMAND
1374 asm ("st %i0,[%fp+68]");
1375 asm ("st %i1,[%fp+72]");
1376 asm ("st %i2,[%fp+76]");
1377 asm ("st %i3,[%fp+80]");
1378 asm ("st %i4,[%fp+84]");
1380 asm ("st %i5,[%fp+88]");
1381 #ifdef NEED_TYPE_COMMAND
1382 asm (".type __builtin_saveregs,#function");
1383 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1385 #else /* not __sparc__ */
1386 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1390 asm (" .set nomips16");
1392 asm (" .ent __builtin_saveregs");
1393 asm (" .globl __builtin_saveregs");
1394 asm ("__builtin_saveregs:");
1395 asm (" sw $4,0($30)");
1396 asm (" sw $5,4($30)");
1397 asm (" sw $6,8($30)");
1398 asm (" sw $7,12($30)");
1400 asm (" .end __builtin_saveregs");
1401 #else /* not __mips__, etc. */
1404 __builtin_saveregs ()
1409 #endif /* not __mips__ */
1410 #endif /* not __sparc__ */
1411 #endif /* not __i860__ */
1415 #ifndef inhibit_libc
1417 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1419 /* This is used by the `assert' macro. */
1420 extern void __eprintf (const char *, const char *, unsigned int, const char *)
1421 __attribute__ ((__noreturn__
));
1424 __eprintf (const char *string
, const char *expression
,
1425 unsigned int line
, const char *filename
)
1427 fprintf (stderr
, string
, expression
, line
, filename
);
1437 /* Structure emitted by -a */
1441 const char *filename
;
1445 const unsigned long *addresses
;
1447 /* Older GCC's did not emit these fields. */
1449 const char **functions
;
1450 const long *line_nums
;
1451 const char **filenames
;
1455 #ifdef BLOCK_PROFILER_CODE
1458 #ifndef inhibit_libc
1460 /* Simple minded basic block profiling output dumper for
1461 systems that don't provide tcov support. At present,
1462 it requires atexit and stdio. */
1464 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1468 #include "gbl-ctors.h"
1469 #include "gcov-io.h"
1472 static struct bb
*bb_head
;
1474 static int num_digits (long value
, int base
) __attribute__ ((const));
1476 /* Return the number of digits needed to print a value */
1477 /* __inline__ */ static int num_digits (long value
, int base
)
1479 int minus
= (value
< 0 && base
!= 16);
1480 unsigned long v
= (minus
) ? -value
: value
;
1494 __bb_exit_func (void)
1496 FILE *da_file
, *file
;
1503 i
= strlen (bb_head
->filename
) - 3;
1505 if (!strcmp (bb_head
->filename
+i
, ".da"))
1507 /* Must be -fprofile-arcs not -a.
1508 Dump data in a form that gcov expects. */
1512 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1514 /* If the file exists, and the number of counts in it is the same,
1515 then merge them in. */
1517 if ((da_file
= fopen (ptr
->filename
, "r")) != 0)
1521 if (__read_long (&n_counts
, da_file
, 8) != 0)
1523 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1528 if (n_counts
== ptr
->ncounts
)
1532 for (i
= 0; i
< n_counts
; i
++)
1536 if (__read_long (&v
, da_file
, 8) != 0)
1538 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1542 ptr
->counts
[i
] += v
;
1546 if (fclose (da_file
) == EOF
)
1547 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1550 if ((da_file
= fopen (ptr
->filename
, "w")) == 0)
1552 fprintf (stderr
, "arc profiling: Can't open output file %s.\n",
1557 /* ??? Should first write a header to the file. Preferably, a 4 byte
1558 magic number, 4 bytes containing the time the program was
1559 compiled, 4 bytes containing the last modification time of the
1560 source file, and 4 bytes indicating the compiler options used.
1562 That way we can easily verify that the proper source/executable/
1563 data file combination is being used from gcov. */
1565 if (__write_long (ptr
->ncounts
, da_file
, 8) != 0)
1568 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1574 long *count_ptr
= ptr
->counts
;
1576 for (j
= ptr
->ncounts
; j
> 0; j
--)
1578 if (__write_long (*count_ptr
, da_file
, 8) != 0)
1586 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1590 if (fclose (da_file
) == EOF
)
1591 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1598 /* Must be basic block profiling. Emit a human readable output file. */
1600 file
= fopen ("bb.out", "a");
1609 /* This is somewhat type incorrect, but it avoids worrying about
1610 exactly where time.h is included from. It should be ok unless
1611 a void * differs from other pointer formats, or if sizeof (long)
1612 is < sizeof (time_t). It would be nice if we could assume the
1613 use of rationale standards here. */
1615 time ((void *) &time_value
);
1616 fprintf (file
, "Basic block profiling finished on %s\n", ctime ((void *) &time_value
));
1618 /* We check the length field explicitly in order to allow compatibility
1619 with older GCC's which did not provide it. */
1621 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1624 int func_p
= (ptr
->nwords
>= sizeof (struct bb
)
1625 && ptr
->nwords
<= 1000
1627 int line_p
= (func_p
&& ptr
->line_nums
);
1628 int file_p
= (func_p
&& ptr
->filenames
);
1629 int addr_p
= (ptr
->addresses
!= 0);
1630 long ncounts
= ptr
->ncounts
;
1636 int blk_len
= num_digits (ncounts
, 10);
1641 fprintf (file
, "File %s, %ld basic blocks \n\n",
1642 ptr
->filename
, ncounts
);
1644 /* Get max values for each field. */
1645 for (i
= 0; i
< ncounts
; i
++)
1650 if (cnt_max
< ptr
->counts
[i
])
1651 cnt_max
= ptr
->counts
[i
];
1653 if (addr_p
&& addr_max
< ptr
->addresses
[i
])
1654 addr_max
= ptr
->addresses
[i
];
1656 if (line_p
&& line_max
< ptr
->line_nums
[i
])
1657 line_max
= ptr
->line_nums
[i
];
1661 p
= (ptr
->functions
[i
]) ? (ptr
->functions
[i
]) : "<none>";
1669 p
= (ptr
->filenames
[i
]) ? (ptr
->filenames
[i
]) : "<none>";
1676 addr_len
= num_digits (addr_max
, 16);
1677 cnt_len
= num_digits (cnt_max
, 10);
1678 line_len
= num_digits (line_max
, 10);
1680 /* Now print out the basic block information. */
1681 for (i
= 0; i
< ncounts
; i
++)
1684 " Block #%*d: executed %*ld time(s)",
1686 cnt_len
, ptr
->counts
[i
]);
1689 fprintf (file
, " address= 0x%.*lx", addr_len
,
1693 fprintf (file
, " function= %-*s", func_len
,
1694 (ptr
->functions
[i
]) ? ptr
->functions
[i
] : "<none>");
1697 fprintf (file
, " line= %*ld", line_len
, ptr
->line_nums
[i
]);
1700 fprintf (file
, " file= %s",
1701 (ptr
->filenames
[i
]) ? ptr
->filenames
[i
] : "<none>");
1703 fprintf (file
, "\n");
1706 fprintf (file
, "\n");
1710 fprintf (file
, "\n\n");
1716 __bb_init_func (struct bb
*blocks
)
1718 /* User is supposed to check whether the first word is non-0,
1719 but just in case.... */
1721 if (blocks
->zero_word
)
1724 /* Initialize destructor. */
1726 atexit (__bb_exit_func
);
1728 /* Set up linked list. */
1729 blocks
->zero_word
= 1;
1730 blocks
->next
= bb_head
;
1734 #ifndef MACHINE_STATE_SAVE
1735 #define MACHINE_STATE_SAVE(ID)
1737 #ifndef MACHINE_STATE_RESTORE
1738 #define MACHINE_STATE_RESTORE(ID)
1741 /* Number of buckets in hashtable of basic block addresses. */
1743 #define BB_BUCKETS 311
1745 /* Maximum length of string in file bb.in. */
1747 #define BBINBUFSIZE 500
1749 /* BBINBUFSIZE-1 with double quotes. We could use #BBINBUFSIZE or
1750 "BBINBUFSIZE" but want to avoid trouble with preprocessors. */
1752 #define BBINBUFSIZESTR "499"
1756 struct bb_edge
*next
;
1757 unsigned long src_addr
;
1758 unsigned long dst_addr
;
1759 unsigned long count
;
1764 TRACE_KEEP
= 0, TRACE_ON
= 1, TRACE_OFF
= 2
1769 struct bb_func
*next
;
1772 enum bb_func_mode mode
;
1775 /* This is the connection to the outside world.
1776 The BLOCK_PROFILER macro must set __bb.blocks
1777 and __bb.blockno. */
1780 unsigned long blockno
;
1784 /* Vars to store addrs of source and destination basic blocks
1787 static unsigned long bb_src
= 0;
1788 static unsigned long bb_dst
= 0;
1790 static FILE *bb_tracefile
= (FILE *) 0;
1791 static struct bb_edge
**bb_hashbuckets
= (struct bb_edge
**) 0;
1792 static struct bb_func
*bb_func_head
= (struct bb_func
*) 0;
1793 static unsigned long bb_callcount
= 0;
1794 static int bb_mode
= 0;
1796 static unsigned long *bb_stack
= (unsigned long *) 0;
1797 static size_t bb_stacksize
= 0;
1799 static int reported
= 0;
1802 Always : Print execution frequencies of basic blocks
1804 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1805 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1806 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1807 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1812 /*#include <sys/types.h>*/
1813 #include <sys/stat.h>
1814 /*#include <malloc.h>*/
1816 /* Commands executed by gopen. */
1818 #define GOPENDECOMPRESS "gzip -cd "
1819 #define GOPENCOMPRESS "gzip -c >"
1821 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1822 If it does not compile, simply replace gopen by fopen and delete
1823 '.gz' from any first parameter to gopen. */
1826 gopen (char *fn
, char *mode
)
1834 if (mode
[0] != 'r' && mode
[0] != 'w')
1837 p
= fn
+ strlen (fn
)-1;
1838 use_gzip
= ((p
[-1] == '.' && (p
[0] == 'Z' || p
[0] == 'z'))
1839 || (p
[-2] == '.' && p
[-1] == 'g' && p
[0] == 'z'));
1846 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1847 + sizeof (GOPENDECOMPRESS
));
1848 strcpy (s
, GOPENDECOMPRESS
);
1849 strcpy (s
+ (sizeof (GOPENDECOMPRESS
)-1), fn
);
1850 f
= popen (s
, mode
);
1858 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1859 + sizeof (GOPENCOMPRESS
));
1860 strcpy (s
, GOPENCOMPRESS
);
1861 strcpy (s
+ (sizeof (GOPENCOMPRESS
)-1), fn
);
1862 if (!(f
= popen (s
, mode
)))
1863 f
= fopen (s
, mode
);
1870 return fopen (fn
, mode
);
1880 if (!fstat (fileno (f
), &buf
) && S_ISFIFO (buf
.st_mode
))
1888 #endif /* HAVE_POPEN */
1890 /* Called once per program. */
1893 __bb_exit_trace_func ()
1895 FILE *file
= fopen ("bb.out", "a");
1908 gclose (bb_tracefile
);
1910 fclose (bb_tracefile
);
1911 #endif /* HAVE_POPEN */
1914 /* Check functions in `bb.in'. */
1919 const struct bb_func
*p
;
1920 int printed_something
= 0;
1924 /* This is somewhat type incorrect. */
1925 time ((void *) &time_value
);
1927 for (p
= bb_func_head
; p
!= (struct bb_func
*) 0; p
= p
->next
)
1929 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1931 if (!ptr
->filename
|| (p
->filename
!= (char *) 0 && strcmp (p
->filename
, ptr
->filename
)))
1933 for (blk
= 0; blk
< ptr
->ncounts
; blk
++)
1935 if (!strcmp (p
->funcname
, ptr
->functions
[blk
]))
1940 if (!printed_something
)
1942 fprintf (file
, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value
));
1943 printed_something
= 1;
1946 fprintf (file
, "\tFunction %s", p
->funcname
);
1948 fprintf (file
, " of file %s", p
->filename
);
1949 fprintf (file
, "\n" );
1954 if (printed_something
)
1955 fprintf (file
, "\n");
1961 if (!bb_hashbuckets
)
1965 fprintf (stderr
, "Profiler: out of memory\n");
1975 unsigned long addr_max
= 0;
1976 unsigned long cnt_max
= 0;
1980 /* This is somewhat type incorrect, but it avoids worrying about
1981 exactly where time.h is included from. It should be ok unless
1982 a void * differs from other pointer formats, or if sizeof (long)
1983 is < sizeof (time_t). It would be nice if we could assume the
1984 use of rationale standards here. */
1986 time ((void *) &time_value
);
1987 fprintf (file
, "Basic block jump tracing");
1989 switch (bb_mode
& 12)
1992 fprintf (file
, " (with call)");
1996 /* Print nothing. */
2000 fprintf (file
, " (with call & ret)");
2004 fprintf (file
, " (with ret)");
2008 fprintf (file
, " finished on %s\n", ctime ((void *) &time_value
));
2010 for (i
= 0; i
< BB_BUCKETS
; i
++)
2012 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2013 for ( ; bucket
; bucket
= bucket
->next
)
2015 if (addr_max
< bucket
->src_addr
)
2016 addr_max
= bucket
->src_addr
;
2017 if (addr_max
< bucket
->dst_addr
)
2018 addr_max
= bucket
->dst_addr
;
2019 if (cnt_max
< bucket
->count
)
2020 cnt_max
= bucket
->count
;
2023 addr_len
= num_digits (addr_max
, 16);
2024 cnt_len
= num_digits (cnt_max
, 10);
2026 for ( i
= 0; i
< BB_BUCKETS
; i
++)
2028 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2029 for ( ; bucket
; bucket
= bucket
->next
)
2031 fprintf (file
, "Jump from block 0x%.*lx to "
2032 "block 0x%.*lx executed %*lu time(s)\n",
2033 addr_len
, bucket
->src_addr
,
2034 addr_len
, bucket
->dst_addr
,
2035 cnt_len
, bucket
->count
);
2039 fprintf (file
, "\n");
2047 /* Free allocated memory. */
2052 struct bb_func
*old
= f
;
2055 if (old
->funcname
) free (old
->funcname
);
2056 if (old
->filename
) free (old
->filename
);
2067 for (i
= 0; i
< BB_BUCKETS
; i
++)
2069 struct bb_edge
*old
, *bucket
= bb_hashbuckets
[i
];
2074 bucket
= bucket
->next
;
2078 free (bb_hashbuckets
);
2081 for (b
= bb_head
; b
; b
= b
->next
)
2082 if (b
->flags
) free (b
->flags
);
2085 /* Called once per program. */
2091 char buf
[BBINBUFSIZE
];
2094 enum bb_func_mode m
;
2097 /* Initialize destructor. */
2098 atexit (__bb_exit_func
);
2100 if (!(file
= fopen ("bb.in", "r")))
2103 while(fscanf (file
, " %" BBINBUFSIZESTR
"s ", buf
) != EOF
)
2115 if (!strcmp (p
, "__bb_trace__"))
2117 else if (!strcmp (p
, "__bb_jumps__"))
2119 else if (!strcmp (p
, "__bb_hidecall__"))
2121 else if (!strcmp (p
, "__bb_showret__"))
2125 struct bb_func
*f
= (struct bb_func
*) malloc (sizeof (struct bb_func
));
2129 f
->next
= bb_func_head
;
2130 if ((pos
= strchr (p
, ':')))
2132 if (!(f
->funcname
= (char *) malloc (strlen (pos
+1)+1)))
2134 strcpy (f
->funcname
, pos
+1);
2136 if ((f
->filename
= (char *) malloc (l
+1)))
2138 strncpy (f
->filename
, p
, l
);
2139 f
->filename
[l
] = '\0';
2142 f
->filename
= (char *) 0;
2146 if (!(f
->funcname
= (char *) malloc (strlen (p
)+1)))
2148 strcpy (f
->funcname
, p
);
2149 f
->filename
= (char *) 0;
2161 bb_tracefile
= gopen ("bbtrace.gz", "w");
2166 bb_tracefile
= fopen ("bbtrace", "w");
2168 #endif /* HAVE_POPEN */
2172 bb_hashbuckets
= (struct bb_edge
**)
2173 malloc (BB_BUCKETS
* sizeof (struct bb_edge
*));
2175 /* Use a loop here rather than calling bzero to avoid having to
2176 conditionalize its existance. */
2177 for (i
= 0; i
< BB_BUCKETS
; i
++)
2178 bb_hashbuckets
[i
] = 0;
2184 bb_stack
= (unsigned long *) malloc (bb_stacksize
* sizeof (*bb_stack
));
2187 /* Initialize destructor. */
2188 atexit (__bb_exit_trace_func
);
2191 /* Called upon entering a basic block. */
2196 struct bb_edge
*bucket
;
2198 MACHINE_STATE_SAVE("1")
2200 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2203 bb_dst
= __bb
.blocks
->addresses
[__bb
.blockno
];
2204 __bb
.blocks
->counts
[__bb
.blockno
]++;
2208 fwrite (&bb_dst
, sizeof (unsigned long), 1, bb_tracefile
);
2213 struct bb_edge
**startbucket
, **oldnext
;
2215 oldnext
= startbucket
2216 = & bb_hashbuckets
[ (((int) bb_src
*8) ^ (int) bb_dst
) % BB_BUCKETS
];
2217 bucket
= *startbucket
;
2219 for (bucket
= *startbucket
; bucket
;
2220 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2222 if (bucket
->src_addr
== bb_src
2223 && bucket
->dst_addr
== bb_dst
)
2226 *oldnext
= bucket
->next
;
2227 bucket
->next
= *startbucket
;
2228 *startbucket
= bucket
;
2233 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2239 fprintf (stderr
, "Profiler: out of memory\n");
2246 bucket
->src_addr
= bb_src
;
2247 bucket
->dst_addr
= bb_dst
;
2248 bucket
->next
= *startbucket
;
2249 *startbucket
= bucket
;
2260 MACHINE_STATE_RESTORE("1")
2264 /* Called when returning from a function and `__bb_showret__' is set. */
2267 __bb_trace_func_ret ()
2269 struct bb_edge
*bucket
;
2271 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2276 struct bb_edge
**startbucket
, **oldnext
;
2278 oldnext
= startbucket
2279 = & bb_hashbuckets
[ (((int) bb_dst
* 8) ^ (int) bb_src
) % BB_BUCKETS
];
2280 bucket
= *startbucket
;
2282 for (bucket
= *startbucket
; bucket
;
2283 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2285 if (bucket
->src_addr
== bb_dst
2286 && bucket
->dst_addr
== bb_src
)
2289 *oldnext
= bucket
->next
;
2290 bucket
->next
= *startbucket
;
2291 *startbucket
= bucket
;
2296 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2302 fprintf (stderr
, "Profiler: out of memory\n");
2309 bucket
->src_addr
= bb_dst
;
2310 bucket
->dst_addr
= bb_src
;
2311 bucket
->next
= *startbucket
;
2312 *startbucket
= bucket
;
2325 /* Called upon entering the first function of a file. */
2328 __bb_init_file (struct bb
*blocks
)
2331 const struct bb_func
*p
;
2332 long blk
, ncounts
= blocks
->ncounts
;
2333 const char **functions
= blocks
->functions
;
2335 /* Set up linked list. */
2336 blocks
->zero_word
= 1;
2337 blocks
->next
= bb_head
;
2342 || !(blocks
->flags
= (char *) malloc (sizeof (char) * blocks
->ncounts
)))
2345 for (blk
= 0; blk
< ncounts
; blk
++)
2346 blocks
->flags
[blk
] = 0;
2348 for (blk
= 0; blk
< ncounts
; blk
++)
2350 for (p
= bb_func_head
; p
; p
= p
->next
)
2352 if (!strcmp (p
->funcname
, functions
[blk
])
2353 && (!p
->filename
|| !strcmp (p
->filename
, blocks
->filename
)))
2355 blocks
->flags
[blk
] |= p
->mode
;
2362 /* Called when exiting from a function. */
2368 MACHINE_STATE_SAVE("2")
2372 if ((bb_mode
& 12) && bb_stacksize
> bb_callcount
)
2374 bb_src
= bb_stack
[bb_callcount
];
2376 __bb_trace_func_ret ();
2382 MACHINE_STATE_RESTORE("2")
2386 /* Called when entering a function. */
2389 __bb_init_trace_func (struct bb
*blocks
, unsigned long blockno
)
2391 static int trace_init
= 0;
2393 MACHINE_STATE_SAVE("3")
2395 if (!blocks
->zero_word
)
2402 __bb_init_file (blocks
);
2412 if (bb_callcount
>= bb_stacksize
)
2414 size_t newsize
= bb_callcount
+ 100;
2416 bb_stack
= (unsigned long *) realloc (bb_stack
, newsize
);
2421 fprintf (stderr
, "Profiler: out of memory\n");
2425 goto stack_overflow
;
2427 bb_stacksize
= newsize
;
2429 bb_stack
[bb_callcount
] = bb_src
;
2440 else if (blocks
->flags
&& (blocks
->flags
[blockno
] & TRACE_ON
))
2446 bb_stack
[bb_callcount
] = bb_src
;
2449 MACHINE_STATE_RESTORE("3")
2452 #endif /* not inhibit_libc */
2453 #endif /* not BLOCK_PROFILER_CODE */
2457 unsigned int __shtab
[] = {
2458 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2459 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2460 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2461 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2462 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2463 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2464 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2465 0x10000000, 0x20000000, 0x40000000, 0x80000000
2469 #ifdef L_clear_cache
2470 /* Clear part of an instruction cache. */
2472 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2475 __clear_cache (char *beg
__attribute__((__unused__
)),
2476 char *end
__attribute__((__unused__
)))
2478 #ifdef CLEAR_INSN_CACHE
2479 CLEAR_INSN_CACHE (beg
, end
);
2481 #ifdef INSN_CACHE_SIZE
2482 static char array
[INSN_CACHE_SIZE
+ INSN_CACHE_PLANE_SIZE
+ INSN_CACHE_LINE_WIDTH
];
2483 static int initialized
;
2487 typedef (*function_ptr
) ();
2489 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2490 /* It's cheaper to clear the whole cache.
2491 Put in a series of jump instructions so that calling the beginning
2492 of the cache will clear the whole thing. */
2496 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2497 & -INSN_CACHE_LINE_WIDTH
);
2498 int end_ptr
= ptr
+ INSN_CACHE_SIZE
;
2500 while (ptr
< end_ptr
)
2502 *(INSTRUCTION_TYPE
*)ptr
2503 = JUMP_AHEAD_INSTRUCTION
+ INSN_CACHE_LINE_WIDTH
;
2504 ptr
+= INSN_CACHE_LINE_WIDTH
;
2506 *(INSTRUCTION_TYPE
*) (ptr
- INSN_CACHE_LINE_WIDTH
) = RETURN_INSTRUCTION
;
2511 /* Call the beginning of the sequence. */
2512 (((function_ptr
) (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2513 & -INSN_CACHE_LINE_WIDTH
))
2516 #else /* Cache is large. */
2520 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2521 & -INSN_CACHE_LINE_WIDTH
);
2523 while (ptr
< (int) array
+ sizeof array
)
2525 *(INSTRUCTION_TYPE
*)ptr
= RETURN_INSTRUCTION
;
2526 ptr
+= INSN_CACHE_LINE_WIDTH
;
2532 /* Find the location in array that occupies the same cache line as BEG. */
2534 offset
= ((int) beg
& -INSN_CACHE_LINE_WIDTH
) & (INSN_CACHE_PLANE_SIZE
- 1);
2535 start_addr
= (((int) (array
+ INSN_CACHE_PLANE_SIZE
- 1)
2536 & -INSN_CACHE_PLANE_SIZE
)
2539 /* Compute the cache alignment of the place to stop clearing. */
2540 #if 0 /* This is not needed for gcc's purposes. */
2541 /* If the block to clear is bigger than a cache plane,
2542 we clear the entire cache, and OFFSET is already correct. */
2543 if (end
< beg
+ INSN_CACHE_PLANE_SIZE
)
2545 offset
= (((int) (end
+ INSN_CACHE_LINE_WIDTH
- 1)
2546 & -INSN_CACHE_LINE_WIDTH
)
2547 & (INSN_CACHE_PLANE_SIZE
- 1));
2549 #if INSN_CACHE_DEPTH > 1
2550 end_addr
= (start_addr
& -INSN_CACHE_PLANE_SIZE
) + offset
;
2551 if (end_addr
<= start_addr
)
2552 end_addr
+= INSN_CACHE_PLANE_SIZE
;
2554 for (plane
= 0; plane
< INSN_CACHE_DEPTH
; plane
++)
2556 int addr
= start_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2557 int stop
= end_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2559 while (addr
!= stop
)
2561 /* Call the return instruction at ADDR. */
2562 ((function_ptr
) addr
) ();
2564 addr
+= INSN_CACHE_LINE_WIDTH
;
2567 #else /* just one plane */
2570 /* Call the return instruction at START_ADDR. */
2571 ((function_ptr
) start_addr
) ();
2573 start_addr
+= INSN_CACHE_LINE_WIDTH
;
2575 while ((start_addr
% INSN_CACHE_SIZE
) != offset
);
2576 #endif /* just one plane */
2577 #endif /* Cache is large */
2578 #endif /* Cache exists */
2579 #endif /* CLEAR_INSN_CACHE */
2582 #endif /* L_clear_cache */
2586 /* Jump to a trampoline, loading the static chain address. */
2588 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2600 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2604 mprotect (char *addr
, int len
, int prot
)
2621 if (VirtualProtect (addr
, len
, np
, &op
))
2627 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2629 #ifdef TRANSFER_FROM_TRAMPOLINE
2630 TRANSFER_FROM_TRAMPOLINE
2633 #if defined (NeXT) && defined (__MACH__)
2635 /* Make stack executable so we can call trampolines on stack.
2636 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2640 #include <mach/mach.h>
2644 __enable_execute_stack (char *addr
)
2647 char *eaddr
= addr
+ TRAMPOLINE_SIZE
;
2648 vm_address_t a
= (vm_address_t
) addr
;
2650 /* turn on execute access on stack */
2651 r
= vm_protect (task_self (), a
, TRAMPOLINE_SIZE
, FALSE
, VM_PROT_ALL
);
2652 if (r
!= KERN_SUCCESS
)
2654 mach_error("vm_protect VM_PROT_ALL", r
);
2658 /* We inline the i-cache invalidation for speed */
2660 #ifdef CLEAR_INSN_CACHE
2661 CLEAR_INSN_CACHE (addr
, eaddr
);
2663 __clear_cache ((int) addr
, (int) eaddr
);
2667 #endif /* defined (NeXT) && defined (__MACH__) */
2671 /* Make stack executable so we can call trampolines on stack.
2672 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2674 #include <sys/mman.h>
2675 #include <sys/vmparam.h>
2676 #include <machine/machparam.h>
2679 __enable_execute_stack ()
2682 static unsigned lowest
= USRSTACK
;
2683 unsigned current
= (unsigned) &fp
& -NBPG
;
2685 if (lowest
> current
)
2687 unsigned len
= lowest
- current
;
2688 mremap (current
, &len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_PRIVATE
);
2692 /* Clear instruction cache in case an old trampoline is in it. */
2695 #endif /* __convex__ */
2699 /* Modified from the convex -code above. */
2701 #include <sys/param.h>
2703 #include <sys/m88kbcs.h>
2706 __enable_execute_stack ()
2709 static unsigned long lowest
= USRSTACK
;
2710 unsigned long current
= (unsigned long) &save_errno
& -NBPC
;
2712 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2713 address is seen as 'negative'. That is the case with the stack. */
2716 if (lowest
> current
)
2718 unsigned len
=lowest
-current
;
2719 memctl(current
,len
,MCT_TEXT
);
2723 memctl(current
,NBPC
,MCT_TEXT
);
2727 #endif /* __sysV88__ */
2731 #include <sys/signal.h>
2734 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2735 so define it here, because we need it in __clear_insn_cache below */
2736 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2737 hence we enable this stuff only if MCT_TEXT is #define'd. */
2752 /* Clear instruction cache so we can call trampolines on stack.
2753 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2756 __clear_insn_cache ()
2761 /* Preserve errno, because users would be surprised to have
2762 errno changing without explicitly calling any system-call. */
2765 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2766 No need to use an address derived from _start or %sp, as 0 works also. */
2767 memctl(0, 4096, MCT_TEXT
);
2772 #endif /* __sysV68__ */
2776 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2778 #include <sys/mman.h>
2779 #include <sys/types.h>
2780 #include <sys/param.h>
2781 #include <sys/vmmac.h>
2783 /* Modified from the convex -code above.
2784 mremap promises to clear the i-cache. */
2787 __enable_execute_stack ()
2790 if (mprotect (((unsigned int)&fp
/PAGSIZ
)*PAGSIZ
, PAGSIZ
,
2791 PROT_READ
|PROT_WRITE
|PROT_EXEC
))
2793 perror ("mprotect in __enable_execute_stack");
2798 #endif /* __pyr__ */
2800 #if defined (sony_news) && defined (SYSTYPE_BSD)
2803 #include <sys/types.h>
2804 #include <sys/param.h>
2805 #include <syscall.h>
2806 #include <machine/sysnews.h>
2808 /* cacheflush function for NEWS-OS 4.2.
2809 This function is called from trampoline-initialize code
2810 defined in config/mips/mips.h. */
2813 cacheflush (char *beg
, int size
, int flag
)
2815 if (syscall (SYS_sysnews
, NEWS_CACHEFLUSH
, beg
, size
, FLUSH_BCACHE
))
2817 perror ("cache_flush");
2823 #endif /* sony_news */
2824 #endif /* L_trampoline */
2829 #include "gbl-ctors.h"
2830 /* Some systems use __main in a way incompatible with its use in gcc, in these
2831 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2832 give the same symbol without quotes for an alternative entry point. You
2833 must define both, or neither. */
2835 #define NAME__MAIN "__main"
2836 #define SYMBOL__MAIN __main
2839 #ifdef INIT_SECTION_ASM_OP
2840 #undef HAS_INIT_SECTION
2841 #define HAS_INIT_SECTION
2844 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2846 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2847 code to run constructors. In that case, we need to handle EH here, too. */
2849 #ifdef EH_FRAME_SECTION
2851 extern unsigned char __EH_FRAME_BEGIN__
[];
2854 /* Run all the global destructors on exit from the program. */
2857 __do_global_dtors ()
2859 #ifdef DO_GLOBAL_DTORS_BODY
2860 DO_GLOBAL_DTORS_BODY
;
2862 static func_ptr
*p
= __DTOR_LIST__
+ 1;
2869 #ifdef EH_FRAME_SECTION
2871 static int completed
= 0;
2875 __deregister_frame_info (__EH_FRAME_BEGIN__
);
2882 #ifndef HAS_INIT_SECTION
2883 /* Run all the global constructors on entry to the program. */
2886 __do_global_ctors ()
2888 #ifdef EH_FRAME_SECTION
2890 static struct object object
;
2891 __register_frame_info (__EH_FRAME_BEGIN__
, &object
);
2894 DO_GLOBAL_CTORS_BODY
;
2895 atexit (__do_global_dtors
);
2897 #endif /* no HAS_INIT_SECTION */
2899 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2900 /* Subroutine called automatically by `main'.
2901 Compiling a global function named `main'
2902 produces an automatic call to this function at the beginning.
2904 For many systems, this routine calls __do_global_ctors.
2905 For systems which support a .init section we use the .init section
2906 to run __do_global_ctors, so we need not do anything here. */
2911 /* Support recursive calls to `main': run initializers just once. */
2912 static int initialized
;
2916 __do_global_ctors ();
2919 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2921 #endif /* L__main */
2922 #endif /* __CYGWIN__ */
2926 #include "gbl-ctors.h"
2928 /* Provide default definitions for the lists of constructors and
2929 destructors, so that we don't get linker errors. These symbols are
2930 intentionally bss symbols, so that gld and/or collect will provide
2931 the right values. */
2933 /* We declare the lists here with two elements each,
2934 so that they are valid empty lists if no other definition is loaded.
2936 If we are using the old "set" extensions to have the gnu linker
2937 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2938 must be in the bss/common section.
2940 Long term no port should use those extensions. But many still do. */
2941 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2942 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2943 func_ptr __CTOR_LIST__
[2] = {0, 0};
2944 func_ptr __DTOR_LIST__
[2] = {0, 0};
2946 func_ptr __CTOR_LIST__
[2];
2947 func_ptr __DTOR_LIST__
[2];
2949 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2950 #endif /* L_ctors */
2954 #include "gbl-ctors.h"
2962 static func_ptr
*atexit_chain
= 0;
2963 static long atexit_chain_length
= 0;
2964 static volatile long last_atexit_chain_slot
= -1;
2967 atexit (func_ptr func
)
2969 if (++last_atexit_chain_slot
== atexit_chain_length
)
2971 atexit_chain_length
+= 32;
2973 atexit_chain
= (func_ptr
*) realloc (atexit_chain
, atexit_chain_length
2974 * sizeof (func_ptr
));
2976 atexit_chain
= (func_ptr
*) malloc (atexit_chain_length
2977 * sizeof (func_ptr
));
2980 atexit_chain_length
= 0;
2981 last_atexit_chain_slot
= -1;
2986 atexit_chain
[last_atexit_chain_slot
] = func
;
2990 extern void _cleanup ();
2991 extern void _exit () __attribute__ ((noreturn
));
2998 for ( ; last_atexit_chain_slot
-- >= 0; )
3000 (*atexit_chain
[last_atexit_chain_slot
+ 1]) ();
3001 atexit_chain
[last_atexit_chain_slot
+ 1] = 0;
3003 free (atexit_chain
);
3016 /* Simple; we just need a wrapper for ON_EXIT. */
3018 atexit (func_ptr func
)
3020 return ON_EXIT (func
);
3023 #endif /* ON_EXIT */
3024 #endif /* NEED_ATEXIT */
3032 /* Shared exception handling support routines. */
3034 extern void __default_terminate (void) __attribute__ ((__noreturn__
));
3037 __default_terminate ()
3042 void (*__terminate_func
)() = __default_terminate
;
3047 (*__terminate_func
)();
3051 __throw_type_match (void *catch_type
, void *throw_type
, void *obj
)
3054 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3055 catch_type
, throw_type
);
3057 if (strcmp ((const char *)catch_type
, (const char *)throw_type
) == 0)
3068 /* Include definitions of EH context and table layout */
3070 #include "eh-common.h"
3071 #ifndef inhibit_libc
3075 /* Allocate and return a new EH context structure. */
3077 extern void __throw ();
3082 struct eh_full_context
{
3083 struct eh_context c
;
3085 } *ehfc
= (struct eh_full_context
*) malloc (sizeof *ehfc
);
3090 memset (ehfc
, 0, sizeof *ehfc
);
3092 ehfc
->c
.dynamic_handler_chain
= (void **) ehfc
->top_elt
;
3094 /* This should optimize out entirely. This should always be true,
3095 but just in case it ever isn't, don't allow bogus code to be
3098 if ((void*)(&ehfc
->c
) != (void*)ehfc
)
3105 static __gthread_key_t eh_context_key
;
3107 /* Destructor for struct eh_context. */
3109 eh_context_free (void *ptr
)
3111 __gthread_key_dtor (eh_context_key
, ptr
);
3117 /* Pointer to function to return EH context. */
3119 static struct eh_context
*eh_context_initialize ();
3120 static struct eh_context
*eh_context_static ();
3122 static struct eh_context
*eh_context_specific ();
3125 static struct eh_context
*(*get_eh_context
) () = &eh_context_initialize
;
3127 /* Routine to get EH context.
3128 This one will simply call the function pointer. */
3133 return (void *) (*get_eh_context
) ();
3136 /* Get and set the language specific info pointer. */
3141 struct eh_context
*eh
= (*get_eh_context
) ();
3145 #ifdef DWARF2_UNWIND_INFO
3146 static int dwarf_reg_size_table_initialized
= 0;
3147 static char dwarf_reg_size_table
[FIRST_PSEUDO_REGISTER
];
3150 init_reg_size_table ()
3152 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table
);
3153 dwarf_reg_size_table_initialized
= 1;
3159 eh_threads_initialize ()
3161 /* Try to create the key. If it fails, revert to static method,
3162 otherwise start using thread specific EH contexts. */
3163 if (__gthread_key_create (&eh_context_key
, &eh_context_free
) == 0)
3164 get_eh_context
= &eh_context_specific
;
3166 get_eh_context
= &eh_context_static
;
3168 #endif /* no __GTHREADS */
3170 /* Initialize EH context.
3171 This will be called only once, since we change GET_EH_CONTEXT
3172 pointer to another routine. */
3174 static struct eh_context
*
3175 eh_context_initialize ()
3179 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
3180 /* Make sure that get_eh_context does not point to us anymore.
3181 Some systems have dummy thread routines in their libc that
3182 return a success (Solaris 2.6 for example). */
3183 if (__gthread_once (&once
, eh_threads_initialize
) != 0
3184 || get_eh_context
== &eh_context_initialize
)
3186 /* Use static version of EH context. */
3187 get_eh_context
= &eh_context_static
;
3189 #ifdef DWARF2_UNWIND_INFO
3191 static __gthread_once_t once_regsizes
= __GTHREAD_ONCE_INIT
;
3192 if (__gthread_once (&once_regsizes
, init_reg_size_table
) != 0
3193 || ! dwarf_reg_size_table_initialized
)
3194 init_reg_size_table ();
3198 #else /* no __GTHREADS */
3200 /* Use static version of EH context. */
3201 get_eh_context
= &eh_context_static
;
3203 #ifdef DWARF2_UNWIND_INFO
3204 init_reg_size_table ();
3207 #endif /* no __GTHREADS */
3209 return (*get_eh_context
) ();
3212 /* Return a static EH context. */
3214 static struct eh_context
*
3215 eh_context_static ()
3217 static struct eh_context eh
;
3218 static int initialized
;
3219 static void *top_elt
[2];
3224 memset (&eh
, 0, sizeof eh
);
3225 eh
.dynamic_handler_chain
= top_elt
;
3231 /* Return a thread specific EH context. */
3233 static struct eh_context
*
3234 eh_context_specific ()
3236 struct eh_context
*eh
;
3237 eh
= (struct eh_context
*) __gthread_getspecific (eh_context_key
);
3240 eh
= new_eh_context ();
3241 if (__gthread_setspecific (eh_context_key
, (void *) eh
) != 0)
3249 /* Support routines for setjmp/longjmp exception handling. */
3251 /* Calls to __sjthrow are generated by the compiler when an exception
3252 is raised when using the setjmp/longjmp exception handling codegen
3255 #ifdef DONT_USE_BUILTIN_SETJMP
3256 extern void longjmp (void *, int);
3259 /* Routine to get the head of the current thread's dynamic handler chain
3260 use for exception handling. */
3263 __get_dynamic_handler_chain ()
3265 struct eh_context
*eh
= (*get_eh_context
) ();
3266 return &eh
->dynamic_handler_chain
;
3269 /* This is used to throw an exception when the setjmp/longjmp codegen
3270 method is used for exception handling.
3272 We call __terminate if there are no handlers left. Otherwise we run the
3273 cleanup actions off the dynamic cleanup stack, and pop the top of the
3274 dynamic handler chain, and use longjmp to transfer back to the associated
3277 extern void __sjthrow (void) __attribute__ ((__noreturn__
));
3282 struct eh_context
*eh
= (*get_eh_context
) ();
3283 void ***dhc
= &eh
->dynamic_handler_chain
;
3285 void (*func
)(void *, int);
3289 /* The cleanup chain is one word into the buffer. Get the cleanup
3291 cleanup
= (void***)&(*dhc
)[1];
3293 /* If there are any cleanups in the chain, run them now. */
3297 void **buf
= (void**)store
;
3302 #ifdef DONT_USE_BUILTIN_SETJMP
3303 if (! setjmp (&buf
[2]))
3305 if (! __builtin_setjmp (&buf
[2]))
3311 func
= (void(*)(void*, int))cleanup
[0][1];
3312 arg
= (void*)cleanup
[0][2];
3314 /* Update this before running the cleanup. */
3315 cleanup
[0] = (void **)cleanup
[0][0];
3328 /* We must call terminate if we try and rethrow an exception, when
3329 there is no exception currently active and when there are no
3331 if (! eh
->info
|| (*dhc
)[0] == 0)
3334 /* Find the jmpbuf associated with the top element of the dynamic
3335 handler chain. The jumpbuf starts two words into the buffer. */
3336 jmpbuf
= &(*dhc
)[2];
3338 /* Then we pop the top element off the dynamic handler chain. */
3339 *dhc
= (void**)(*dhc
)[0];
3341 /* And then we jump to the handler. */
3343 #ifdef DONT_USE_BUILTIN_SETJMP
3344 longjmp (jmpbuf
, 1);
3346 __builtin_longjmp (jmpbuf
, 1);
3350 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3351 handler, then pop the handler off the dynamic handler stack, and
3352 then throw. This is used to skip the first handler, and transfer
3353 control to the next handler in the dynamic handler stack. */
3355 extern void __sjpopnthrow (void) __attribute__ ((__noreturn__
));
3360 struct eh_context
*eh
= (*get_eh_context
) ();
3361 void ***dhc
= &eh
->dynamic_handler_chain
;
3362 void (*func
)(void *, int);
3366 /* The cleanup chain is one word into the buffer. Get the cleanup
3368 cleanup
= (void***)&(*dhc
)[1];
3370 /* If there are any cleanups in the chain, run them now. */
3374 void **buf
= (void**)store
;
3379 #ifdef DONT_USE_BUILTIN_SETJMP
3380 if (! setjmp (&buf
[2]))
3382 if (! __builtin_setjmp (&buf
[2]))
3388 func
= (void(*)(void*, int))cleanup
[0][1];
3389 arg
= (void*)cleanup
[0][2];
3391 /* Update this before running the cleanup. */
3392 cleanup
[0] = (void **)cleanup
[0][0];
3405 /* Then we pop the top element off the dynamic handler chain. */
3406 *dhc
= (void**)(*dhc
)[0];
3411 /* Support code for all exception region-based exception handling. */
3414 __eh_rtime_match (void *rtime
)
3417 __eh_matcher matcher
;
3420 info
= *(__get_eh_info ());
3421 matcher
= ((__eh_info
*)info
)->match_function
;
3424 #ifndef inhibit_libc
3425 fprintf (stderr
, "Internal Compiler Bug: No runtime type matcher.");
3429 ret
= (*matcher
) (info
, rtime
, (void *)0);
3430 return (ret
!= NULL
);
3433 /* This value identifies the place from which an exception is being
3436 #ifdef EH_TABLE_LOOKUP
3442 #ifdef DWARF2_UNWIND_INFO
3444 /* Return the table version of an exception descriptor */
3447 __get_eh_table_version (exception_descriptor
*table
)
3449 return table
->lang
.version
;
3452 /* Return the originating table language of an exception descriptor */
3455 __get_eh_table_language (exception_descriptor
*table
)
3457 return table
->lang
.language
;
3460 /* This routine takes a PC and a pointer to the exception region TABLE for
3461 its translation unit, and returns the address of the exception handler
3462 associated with the closest exception table handler entry associated
3463 with that PC, or 0 if there are no table entries the PC fits in.
3465 In the advent of a tie, we have to give the last entry, as it represents
3469 old_find_exception_handler (void *pc
, old_exception_table
*table
)
3476 /* We can't do a binary search because the table isn't guaranteed
3477 to be sorted from function to function. */
3478 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3480 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3482 /* This can apply. Make sure it is at least as small as
3483 the previous best. */
3484 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3485 && table
[pos
].start_region
>= table
[best
].start_region
))
3488 /* But it is sorted by starting PC within a function. */
3489 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3493 return table
[best
].exception_handler
;
3499 /* find_exception_handler finds the correct handler, if there is one, to
3500 handle an exception.
3501 returns a pointer to the handler which controlled should be transferred
3502 to, or NULL if there is nothing left.
3504 PC - pc where the exception originates. If this is a rethrow,
3505 then this starts out as a pointer to the exception table
3506 entry we wish to rethrow out of.
3507 TABLE - exception table for the current module.
3508 EH_INFO - eh info pointer for this exception.
3509 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3510 CLEANUP - returned flag indicating whether this is a cleanup handler.
3513 find_exception_handler (void *pc
, exception_descriptor
*table
,
3514 __eh_info
*eh_info
, int rethrow
, int *cleanup
)
3517 void *retval
= NULL
;
3522 /* The new model assumed the table is sorted inner-most out so the
3523 first region we find which matches is the correct one */
3525 exception_table
*tab
= &(table
->table
[0]);
3527 /* Subtract 1 from the PC to avoid hitting the next region */
3530 /* pc is actually the region table entry to rethrow out of */
3531 pos
= ((exception_table
*) pc
) - tab
;
3532 pc
= ((exception_table
*) pc
)->end_region
- 1;
3534 /* The label is always on the LAST handler entry for a region,
3535 so we know the next entry is a different region, even if the
3536 addresses are the same. Make sure its not end of table tho. */
3537 if (tab
[pos
].start_region
!= (void *) -1)
3543 /* We can't do a binary search because the table is in inner-most
3544 to outermost address ranges within functions */
3545 for ( ; tab
[pos
].start_region
!= (void *) -1; pos
++)
3547 if (tab
[pos
].start_region
<= pc
&& tab
[pos
].end_region
> pc
)
3549 if (tab
[pos
].match_info
)
3551 __eh_matcher matcher
= eh_info
->match_function
;
3552 /* match info but no matcher is NOT a match */
3555 void *ret
= (*matcher
)((void *) eh_info
,
3556 tab
[pos
].match_info
, table
);
3560 retval
= tab
[pos
].exception_handler
;
3569 retval
= tab
[pos
].exception_handler
;
3576 #endif /* DWARF2_UNWIND_INFO */
3577 #endif /* EH_TABLE_LOOKUP */
3579 #ifdef DWARF2_UNWIND_INFO
3580 /* Support code for exception handling using static unwind information. */
3584 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3585 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3586 avoid a warning about casting between int and pointer of different
3589 typedef int ptr_type
__attribute__ ((mode (pointer
)));
3591 #ifdef INCOMING_REGNO
3592 /* Is the saved value for register REG in frame UDATA stored in a register
3593 window in the previous frame? */
3595 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3596 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3597 compiled functions won't work with the frame-unwind stuff here.
3598 Perhaps the entireity of in_reg_window should be conditional on having
3599 seen a DW_CFA_GNU_window_save? */
3600 #define target_flags 0
3603 in_reg_window (int reg
, frame_state
*udata
)
3605 if (udata
->saved
[reg
] == REG_SAVED_REG
)
3606 return INCOMING_REGNO (reg
) == reg
;
3607 if (udata
->saved
[reg
] != REG_SAVED_OFFSET
)
3610 #ifdef STACK_GROWS_DOWNWARD
3611 return udata
->reg_or_offset
[reg
] > 0;
3613 return udata
->reg_or_offset
[reg
] < 0;
3617 static inline int in_reg_window (int reg
, frame_state
*udata
) { return 0; }
3618 #endif /* INCOMING_REGNO */
3620 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3621 frame called by UDATA or 0. */
3624 get_reg_addr (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3626 while (udata
->saved
[reg
] == REG_SAVED_REG
)
3628 reg
= udata
->reg_or_offset
[reg
];
3629 if (in_reg_window (reg
, udata
))
3635 if (udata
->saved
[reg
] == REG_SAVED_OFFSET
)
3636 return (word_type
*)(udata
->cfa
+ udata
->reg_or_offset
[reg
]);
3641 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3642 frame called by UDATA or 0. */
3644 static inline void *
3645 get_reg (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3647 return (void *)(ptr_type
) *get_reg_addr (reg
, udata
, sub_udata
);
3650 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3653 put_reg (unsigned reg
, void *val
, frame_state
*udata
)
3655 *get_reg_addr (reg
, udata
, NULL
) = (word_type
)(ptr_type
) val
;
3658 /* Copy the saved value for register REG from frame UDATA to frame
3659 TARGET_UDATA. Unlike the previous two functions, this can handle
3660 registers that are not one word large. */
3663 copy_reg (unsigned reg
, frame_state
*udata
, frame_state
*target_udata
)
3665 word_type
*preg
= get_reg_addr (reg
, udata
, NULL
);
3666 word_type
*ptreg
= get_reg_addr (reg
, target_udata
, NULL
);
3668 memcpy (ptreg
, preg
, dwarf_reg_size_table
[reg
]);
3671 /* Retrieve the return address for frame UDATA. */
3673 static inline void *
3674 get_return_addr (frame_state
*udata
, frame_state
*sub_udata
)
3676 return __builtin_extract_return_addr
3677 (get_reg (udata
->retaddr_column
, udata
, sub_udata
));
3680 /* Overwrite the return address for frame UDATA with VAL. */
3683 put_return_addr (void *val
, frame_state
*udata
)
3685 val
= __builtin_frob_return_addr (val
);
3686 put_reg (udata
->retaddr_column
, val
, udata
);
3689 /* Given the current frame UDATA and its return address PC, return the
3690 information about the calling frame in CALLER_UDATA. */
3693 next_stack_level (void *pc
, frame_state
*udata
, frame_state
*caller_udata
)
3695 caller_udata
= __frame_state_for (pc
, caller_udata
);
3699 /* Now go back to our caller's stack frame. If our caller's CFA register
3700 was saved in our stack frame, restore it; otherwise, assume the CFA
3701 register is SP and restore it to our CFA value. */
3702 if (udata
->saved
[caller_udata
->cfa_reg
])
3703 caller_udata
->cfa
= get_reg (caller_udata
->cfa_reg
, udata
, 0);
3705 caller_udata
->cfa
= udata
->cfa
;
3706 caller_udata
->cfa
+= caller_udata
->cfa_offset
;
3708 return caller_udata
;
3711 /* Hook to call before __terminate if only cleanup handlers remain. */
3713 __unwinding_cleanup ()
3717 /* throw_helper performs some of the common grunt work for a throw. This
3718 routine is called by throw and rethrows. This is pretty much split
3719 out from the old __throw routine. An addition has been added which allows
3720 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3721 but cleanups remaining. This allows a debugger to examine the state
3722 at which the throw was executed, before any cleanups, rather than
3723 at the terminate point after the stack has been unwound.
3725 EH is the current eh_context structure.
3726 PC is the address of the call to __throw.
3727 MY_UDATA is the unwind information for __throw.
3728 OFFSET_P is where we return the SP adjustment offset. */
3731 throw_helper (eh
, pc
, my_udata
, offset_p
)
3732 struct eh_context
*eh
;
3734 frame_state
*my_udata
;
3737 frame_state ustruct2
, *udata
= &ustruct2
;
3738 frame_state ustruct
;
3739 frame_state
*sub_udata
= &ustruct
;
3740 void *saved_pc
= pc
;
3744 frame_state saved_ustruct
;
3747 int only_cleanup
= 0;
3749 int saved_state
= 0;
3751 __eh_info
*eh_info
= (__eh_info
*)eh
->info
;
3753 /* Do we find a handler based on a re-throw PC? */
3754 if (eh
->table_index
!= (void *) 0)
3757 memcpy (udata
, my_udata
, sizeof (*udata
));
3759 handler
= (void *) 0;
3762 frame_state
*p
= udata
;
3763 udata
= next_stack_level (pc
, udata
, sub_udata
);
3766 /* If we couldn't find the next frame, we lose. */
3770 if (udata
->eh_ptr
== NULL
)
3773 new_eh_model
= (((exception_descriptor
*)(udata
->eh_ptr
))->
3774 runtime_id_field
== NEW_EH_RUNTIME
);
3779 handler
= find_exception_handler (eh
->table_index
, udata
->eh_ptr
,
3780 eh_info
, 1, &cleanup
);
3781 eh
->table_index
= (void *)0;
3785 handler
= find_exception_handler (pc
, udata
->eh_ptr
, eh_info
,
3788 handler
= old_find_exception_handler (pc
, udata
->eh_ptr
);
3790 /* If we found one, we can stop searching, if its not a cleanup.
3791 for cleanups, we save the state, and keep looking. This allows
3792 us to call a debug hook if there are nothing but cleanups left. */
3799 saved_ustruct
= *udata
;
3800 handler_p
= handler
;
3813 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3814 hitting the beginning of the next region. */
3815 pc
= get_return_addr (udata
, sub_udata
) - 1;
3820 udata
= &saved_ustruct
;
3821 handler
= handler_p
;
3824 __unwinding_cleanup ();
3827 /* If we haven't found a handler by now, this is an unhandled
3832 eh
->handler_label
= handler
;
3834 args_size
= udata
->args_size
;
3837 /* We found a handler in the throw context, no need to unwind. */
3843 /* Unwind all the frames between this one and the handler by copying
3844 their saved register values into our register save slots. */
3846 /* Remember the PC where we found the handler. */
3847 void *handler_pc
= pc
;
3849 /* Start from the throw context again. */
3851 memcpy (udata
, my_udata
, sizeof (*udata
));
3853 while (pc
!= handler_pc
)
3855 frame_state
*p
= udata
;
3856 udata
= next_stack_level (pc
, udata
, sub_udata
);
3859 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; ++i
)
3860 if (i
!= udata
->retaddr_column
&& udata
->saved
[i
])
3862 /* If you modify the saved value of the return address
3863 register on the SPARC, you modify the return address for
3864 your caller's frame. Don't do that here, as it will
3865 confuse get_return_addr. */
3866 if (in_reg_window (i
, udata
)
3867 && udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
3868 && udata
->reg_or_offset
[udata
->retaddr_column
] == i
)
3870 copy_reg (i
, udata
, my_udata
);
3873 pc
= get_return_addr (udata
, sub_udata
) - 1;
3876 /* But we do need to update the saved return address register from
3877 the last frame we unwind, or the handler frame will have the wrong
3879 if (udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
)
3881 i
= udata
->reg_or_offset
[udata
->retaddr_column
];
3882 if (in_reg_window (i
, udata
))
3883 copy_reg (i
, udata
, my_udata
);
3886 /* udata now refers to the frame called by the handler frame. */
3888 /* We adjust SP by the difference between __throw's CFA and the CFA for
3889 the frame called by the handler frame, because those CFAs correspond
3890 to the SP values at the two call sites. We need to further adjust by
3891 the args_size of the handler frame itself to get the handler frame's
3892 SP from before the args were pushed for that call. */
3893 #ifdef STACK_GROWS_DOWNWARD
3894 *offset_p
= udata
->cfa
- my_udata
->cfa
+ args_size
;
3896 *offset_p
= my_udata
->cfa
- udata
->cfa
- args_size
;
3903 /* We first search for an exception handler, and if we don't find
3904 it, we call __terminate on the current stack frame so that we may
3905 use the debugger to walk the stack and understand why no handler
3908 If we find one, then we unwind the frames down to the one that
3909 has the handler and transfer control into the handler. */
3911 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3916 struct eh_context
*eh
= (*get_eh_context
) ();
3920 /* XXX maybe make my_ustruct static so we don't have to look it up for
3922 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3924 /* This is required for C++ semantics. We must call terminate if we
3925 try and rethrow an exception, when there is no exception currently
3930 /* Start at our stack frame. */
3932 my_udata
= __frame_state_for (&&label
, my_udata
);
3936 /* We need to get the value from the CFA register. */
3937 my_udata
->cfa
= __builtin_dwarf_cfa ();
3939 /* Do any necessary initialization to access arbitrary stack frames.
3940 On the SPARC, this means flushing the register windows. */
3941 __builtin_unwind_init ();
3943 /* Now reset pc to the right throw point. */
3944 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3946 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3950 __builtin_eh_return ((void *)eh
, offset
, handler
);
3952 /* Epilogue: restore the handler frame's register values and return
3956 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3962 struct eh_context
*eh
= (*get_eh_context
) ();
3966 /* XXX maybe make my_ustruct static so we don't have to look it up for
3968 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3970 /* This is required for C++ semantics. We must call terminate if we
3971 try and rethrow an exception, when there is no exception currently
3976 /* This is the table index we want to rethrow from. The value of
3977 the END_REGION label is used for the PC of the throw, and the
3978 search begins with the next table entry. */
3979 eh
->table_index
= index
;
3981 /* Start at our stack frame. */
3983 my_udata
= __frame_state_for (&&label
, my_udata
);
3987 /* We need to get the value from the CFA register. */
3988 my_udata
->cfa
= __builtin_dwarf_cfa ();
3990 /* Do any necessary initialization to access arbitrary stack frames.
3991 On the SPARC, this means flushing the register windows. */
3992 __builtin_unwind_init ();
3994 /* Now reset pc to the right throw point. */
3995 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3997 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
4001 __builtin_eh_return ((void *)eh
, offset
, handler
);
4003 /* Epilogue: restore the handler frame's register values and return
4006 #endif /* DWARF2_UNWIND_INFO */
4011 #ifndef inhibit_libc
4012 /* This gets us __GNU_LIBRARY__. */
4013 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
4016 #ifdef __GNU_LIBRARY__
4017 /* Avoid forcing the library's meaning of `write' on the user program
4018 by using the "internal" name (for use within the library) */
4019 #define write(fd, buf, n) __write((fd), (buf), (n))
4021 #endif /* inhibit_libc */
4023 #define MESSAGE "pure virtual method called\n"
4025 extern void __terminate (void) __attribute__ ((__noreturn__
));
4030 #ifndef inhibit_libc
4031 write (2, MESSAGE
, sizeof (MESSAGE
) - 1);