1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 /* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License. */
29 /* It is incorrect to include config.h here, because this file is being
30 compiled for the target, and hence definitions concerning only the host
35 /* We disable this when inhibit_libc, so that gcc can still be built without
36 needing header files first. */
37 /* ??? This is not a good solution, since prototypes may be required in
38 some cases for correct code. See also frame.c. */
40 /* fixproto guarantees these system headers exist. */
47 extern void *malloc (size_t);
50 extern void free (void *);
53 extern int atexit(void (*)(void));
59 #if !defined(L_trampoline) && !defined(inhibit_libc)
63 /* Don't use `fancy_abort' here even if config.h says to use it. */
68 /* In a cross-compilation situation, default to inhibiting compilation
69 of routines that use libc. */
71 #if defined(CROSS_COMPILE) && !defined(inhibit_libc)
75 /* Permit the tm.h file to select the endianness to use just for this
76 file. This is used when the endianness is determined when the
79 #ifndef LIBGCC2_WORDS_BIG_ENDIAN
80 #define LIBGCC2_WORDS_BIG_ENDIAN WORDS_BIG_ENDIAN
83 #ifndef LIBGCC2_LONG_DOUBLE_TYPE_SIZE
84 #define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
87 /* In the first part of this file, we are interfacing to calls generated
88 by the compiler itself. These calls pass values into these routines
89 which have very specific modes (rather than very specific types), and
90 these compiler-generated calls also expect any return values to have
91 very specific modes (rather than very specific types). Thus, we need
92 to avoid using regular C language type names in this part of the file
93 because the sizes for those types can be configured to be anything.
94 Instead we use the following special type names. */
96 typedef unsigned int UQItype
__attribute__ ((mode (QI
)));
97 typedef int SItype
__attribute__ ((mode (SI
)));
98 typedef unsigned int USItype
__attribute__ ((mode (SI
)));
99 typedef int DItype
__attribute__ ((mode (DI
)));
100 typedef unsigned int UDItype
__attribute__ ((mode (DI
)));
102 typedef float SFtype
__attribute__ ((mode (SF
)));
103 typedef float DFtype
__attribute__ ((mode (DF
)));
105 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
106 typedef float XFtype
__attribute__ ((mode (XF
)));
108 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
109 typedef float TFtype
__attribute__ ((mode (TF
)));
112 typedef int word_type
__attribute__ ((mode (__word__
)));
114 /* Make sure that we don't accidentally use any normal C language built-in
115 type names in the first part of this file. Instead we want to use *only*
116 the type names defined above. The following macro definitions insure
117 that if we *do* accidentally use some normal C language built-in type name,
118 we will get a syntax error. */
120 #define char bogus_type
121 #define short bogus_type
122 #define int bogus_type
123 #define long bogus_type
124 #define unsigned bogus_type
125 #define float bogus_type
126 #define double bogus_type
128 #define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT)
130 /* DIstructs are pairs of SItype values in the order determined by
131 LIBGCC2_WORDS_BIG_ENDIAN. */
133 #if LIBGCC2_WORDS_BIG_ENDIAN
134 struct DIstruct
{SItype high
, low
;};
136 struct DIstruct
{SItype low
, high
;};
139 /* We need this union to unpack/pack DImode values, since we don't have
140 any arithmetic yet. Incoming DImode parameters are stored into the
141 `ll' field, and the unpacked result is read from the struct `s'. */
149 #if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\
150 || defined (L_divdi3) || defined (L_udivdi3) \
151 || defined (L_moddi3) || defined (L_umoddi3))
153 #include "longlong.h"
155 #endif /* udiv or mul */
157 extern DItype
__fixunssfdi (SFtype a
);
158 extern DItype
__fixunsdfdi (DFtype a
);
159 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
160 extern DItype
__fixunsxfdi (XFtype a
);
162 #if LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128
163 extern DItype
__fixunstfdi (TFtype a
);
166 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
167 #if defined (L_divdi3) || defined (L_moddi3)
179 w
.s
.high
= -uu
.s
.high
- ((USItype
) w
.s
.low
> 0);
185 /* Unless shift functions are defined whith full ANSI prototypes,
186 parameter b will be promoted to int if word_type is smaller than an int. */
189 __lshrdi3 (DItype u
, word_type b
)
200 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
204 w
.s
.low
= (USItype
)uu
.s
.high
>> -bm
;
208 USItype carries
= (USItype
)uu
.s
.high
<< bm
;
209 w
.s
.high
= (USItype
)uu
.s
.high
>> b
;
210 w
.s
.low
= ((USItype
)uu
.s
.low
>> b
) | carries
;
219 __ashldi3 (DItype u
, word_type b
)
230 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
234 w
.s
.high
= (USItype
)uu
.s
.low
<< -bm
;
238 USItype carries
= (USItype
)uu
.s
.low
>> bm
;
239 w
.s
.low
= (USItype
)uu
.s
.low
<< b
;
240 w
.s
.high
= ((USItype
)uu
.s
.high
<< b
) | carries
;
249 __ashrdi3 (DItype u
, word_type b
)
260 bm
= (sizeof (SItype
) * BITS_PER_UNIT
) - b
;
263 /* w.s.high = 1..1 or 0..0 */
264 w
.s
.high
= uu
.s
.high
>> (sizeof (SItype
) * BITS_PER_UNIT
- 1);
265 w
.s
.low
= uu
.s
.high
>> -bm
;
269 USItype carries
= (USItype
)uu
.s
.high
<< bm
;
270 w
.s
.high
= uu
.s
.high
>> b
;
271 w
.s
.low
= ((USItype
)uu
.s
.low
>> b
) | carries
;
285 w
.s
.low
= ffs (uu
.s
.low
);
288 w
.s
.low
= ffs (uu
.s
.high
);
291 w
.s
.low
+= BITS_PER_UNIT
* sizeof (SItype
);
300 __muldi3 (DItype u
, DItype v
)
308 w
.ll
= __umulsidi3 (uu
.s
.low
, vv
.s
.low
);
309 w
.s
.high
+= ((USItype
) uu
.s
.low
* (USItype
) vv
.s
.high
310 + (USItype
) uu
.s
.high
* (USItype
) vv
.s
.low
);
317 #if defined (sdiv_qrnnd)
319 __udiv_w_sdiv (USItype
*rp
, USItype a1
, USItype a0
, USItype d
)
326 if (a1
< d
- a1
- (a0
>> (SI_TYPE_SIZE
- 1)))
328 /* dividend, divisor, and quotient are nonnegative */
329 sdiv_qrnnd (q
, r
, a1
, a0
, d
);
333 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
334 sub_ddmmss (c1
, c0
, a1
, a0
, d
>> 1, d
<< (SI_TYPE_SIZE
- 1));
335 /* Divide (c1*2^32 + c0) by d */
336 sdiv_qrnnd (q
, r
, c1
, c0
, d
);
337 /* Add 2^31 to quotient */
338 q
+= (USItype
) 1 << (SI_TYPE_SIZE
- 1);
343 b1
= d
>> 1; /* d/2, between 2^30 and 2^31 - 1 */
344 c1
= a1
>> 1; /* A/2 */
345 c0
= (a1
<< (SI_TYPE_SIZE
- 1)) + (a0
>> 1);
347 if (a1
< b1
) /* A < 2^32*b1, so A/2 < 2^31*b1 */
349 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
351 r
= 2*r
+ (a0
& 1); /* Remainder from A/(2*b1) */
368 else if (c1
< b1
) /* So 2^31 <= (A/2)/b1 < 2^32 */
371 c0
= ~c0
; /* logical NOT */
373 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
375 q
= ~q
; /* (A/2)/b1 */
378 r
= 2*r
+ (a0
& 1); /* A/(2*b1) */
396 else /* Implies c1 = b1 */
397 { /* Hence a1 = d - 1 = 2*b1 - 1 */
415 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
417 __udiv_w_sdiv (USItype
*rp
__attribute__ ((__unused__
)),
418 USItype a1
__attribute__ ((__unused__
)),
419 USItype a0
__attribute__ ((__unused__
)),
420 USItype d
__attribute__ ((__unused__
)))
427 #if (defined (L_udivdi3) || defined (L_divdi3) || \
428 defined (L_umoddi3) || defined (L_moddi3))
433 static const UQItype __clz_tab
[] =
435 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
436 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
437 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
438 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
439 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
440 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
441 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
442 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
445 #if (defined (L_udivdi3) || defined (L_divdi3) || \
446 defined (L_umoddi3) || defined (L_moddi3))
450 __udivmoddi4 (UDItype n
, UDItype d
, UDItype
*rp
)
455 USItype d0
, d1
, n0
, n1
, n2
;
467 #if !UDIV_NEEDS_NORMALIZATION
474 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
477 /* Remainder in n0. */
484 d0
= 1 / d0
; /* Divide intentionally by zero. */
486 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
487 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
489 /* Remainder in n0. */
500 #else /* UDIV_NEEDS_NORMALIZATION */
508 count_leading_zeros (bm
, d0
);
512 /* Normalize, i.e. make the most significant bit of the
516 n1
= (n1
<< bm
) | (n0
>> (SI_TYPE_SIZE
- bm
));
520 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
523 /* Remainder in n0 >> bm. */
530 d0
= 1 / d0
; /* Divide intentionally by zero. */
532 count_leading_zeros (bm
, d0
);
536 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
537 conclude (the most significant bit of n1 is set) /\ (the
538 leading quotient digit q1 = 1).
540 This special case is necessary, not an optimization.
541 (Shifts counts of SI_TYPE_SIZE are undefined.) */
550 b
= SI_TYPE_SIZE
- bm
;
554 n1
= (n1
<< bm
) | (n0
>> b
);
557 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
562 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
564 /* Remainder in n0 >> bm. */
574 #endif /* UDIV_NEEDS_NORMALIZATION */
585 /* Remainder in n1n0. */
597 count_leading_zeros (bm
, d1
);
600 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
601 conclude (the most significant bit of n1 is set) /\ (the
602 quotient digit q0 = 0 or 1).
604 This special case is necessary, not an optimization. */
606 /* The condition on the next line takes advantage of that
607 n1 >= d1 (true due to program flow). */
608 if (n1
> d1
|| n0
>= d0
)
611 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
630 b
= SI_TYPE_SIZE
- bm
;
632 d1
= (d1
<< bm
) | (d0
>> b
);
635 n1
= (n1
<< bm
) | (n0
>> b
);
638 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
639 umul_ppmm (m1
, m0
, q0
, d0
);
641 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
644 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
649 /* Remainder in (n1n0 - m1m0) >> bm. */
652 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
653 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
654 rr
.s
.high
= n1
>> bm
;
668 UDItype
__udivmoddi4 ();
671 __divdi3 (DItype u
, DItype v
)
682 uu
.ll
= __negdi2 (uu
.ll
);
685 vv
.ll
= __negdi2 (vv
.ll
);
687 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDItype
*) 0);
696 UDItype
__udivmoddi4 ();
698 __moddi3 (DItype u
, DItype v
)
709 uu
.ll
= __negdi2 (uu
.ll
);
711 vv
.ll
= __negdi2 (vv
.ll
);
713 (void) __udivmoddi4 (uu
.ll
, vv
.ll
, &w
);
722 UDItype
__udivmoddi4 ();
724 __umoddi3 (UDItype u
, UDItype v
)
728 (void) __udivmoddi4 (u
, v
, &w
);
735 UDItype
__udivmoddi4 ();
737 __udivdi3 (UDItype n
, UDItype d
)
739 return __udivmoddi4 (n
, d
, (UDItype
*) 0);
745 __cmpdi2 (DItype a
, DItype b
)
749 au
.ll
= a
, bu
.ll
= b
;
751 if (au
.s
.high
< bu
.s
.high
)
753 else if (au
.s
.high
> bu
.s
.high
)
755 if ((USItype
) au
.s
.low
< (USItype
) bu
.s
.low
)
757 else if ((USItype
) au
.s
.low
> (USItype
) bu
.s
.low
)
765 __ucmpdi2 (DItype a
, DItype b
)
769 au
.ll
= a
, bu
.ll
= b
;
771 if ((USItype
) au
.s
.high
< (USItype
) bu
.s
.high
)
773 else if ((USItype
) au
.s
.high
> (USItype
) bu
.s
.high
)
775 if ((USItype
) au
.s
.low
< (USItype
) bu
.s
.low
)
777 else if ((USItype
) au
.s
.low
> (USItype
) bu
.s
.low
)
783 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
784 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
785 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
788 __fixunstfdi (TFtype a
)
796 /* Compute high word of result, as a flonum. */
797 b
= (a
/ HIGH_WORD_COEFF
);
798 /* Convert that to fixed (but not to DItype!),
799 and shift it into the high word. */
802 /* Remove high part from the TFtype, leaving the low part as flonum. */
804 /* Convert that to fixed (but not to DItype!) and add it in.
805 Sometimes A comes out negative. This is significant, since
806 A has more bits than a long int does. */
808 v
-= (USItype
) (- a
);
815 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
820 return - __fixunstfdi (-a
);
821 return __fixunstfdi (a
);
825 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
826 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
827 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
830 __fixunsxfdi (XFtype a
)
838 /* Compute high word of result, as a flonum. */
839 b
= (a
/ HIGH_WORD_COEFF
);
840 /* Convert that to fixed (but not to DItype!),
841 and shift it into the high word. */
844 /* Remove high part from the XFtype, leaving the low part as flonum. */
846 /* Convert that to fixed (but not to DItype!) and add it in.
847 Sometimes A comes out negative. This is significant, since
848 A has more bits than a long int does. */
850 v
-= (USItype
) (- a
);
857 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
862 return - __fixunsxfdi (-a
);
863 return __fixunsxfdi (a
);
868 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
869 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
872 __fixunsdfdi (DFtype a
)
880 /* Compute high word of result, as a flonum. */
881 b
= (a
/ HIGH_WORD_COEFF
);
882 /* Convert that to fixed (but not to DItype!),
883 and shift it into the high word. */
886 /* Remove high part from the DFtype, leaving the low part as flonum. */
888 /* Convert that to fixed (but not to DItype!) and add it in.
889 Sometimes A comes out negative. This is significant, since
890 A has more bits than a long int does. */
892 v
-= (USItype
) (- a
);
904 return - __fixunsdfdi (-a
);
905 return __fixunsdfdi (a
);
910 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
911 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
914 __fixunssfdi (SFtype original_a
)
916 /* Convert the SFtype to a DFtype, because that is surely not going
917 to lose any bits. Some day someone else can write a faster version
918 that avoids converting to DFtype, and verify it really works right. */
919 DFtype a
= original_a
;
926 /* Compute high word of result, as a flonum. */
927 b
= (a
/ HIGH_WORD_COEFF
);
928 /* Convert that to fixed (but not to DItype!),
929 and shift it into the high word. */
932 /* Remove high part from the DFtype, leaving the low part as flonum. */
934 /* Convert that to fixed (but not to DItype!) and add it in.
935 Sometimes A comes out negative. This is significant, since
936 A has more bits than a long int does. */
938 v
-= (USItype
) (- a
);
950 return - __fixunssfdi (-a
);
951 return __fixunssfdi (a
);
955 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
956 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
957 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
958 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
961 __floatdixf (DItype u
)
965 d
= (SItype
) (u
>> WORD_SIZE
);
966 d
*= HIGH_HALFWORD_COEFF
;
967 d
*= HIGH_HALFWORD_COEFF
;
968 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
974 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
975 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
976 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
977 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
980 __floatditf (DItype u
)
984 d
= (SItype
) (u
>> WORD_SIZE
);
985 d
*= HIGH_HALFWORD_COEFF
;
986 d
*= HIGH_HALFWORD_COEFF
;
987 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
994 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
995 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
996 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
999 __floatdidf (DItype u
)
1003 d
= (SItype
) (u
>> WORD_SIZE
);
1004 d
*= HIGH_HALFWORD_COEFF
;
1005 d
*= HIGH_HALFWORD_COEFF
;
1006 d
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
1013 #define WORD_SIZE (sizeof (SItype) * BITS_PER_UNIT)
1014 #define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
1015 #define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
1016 #define DI_SIZE (sizeof (DItype) * BITS_PER_UNIT)
1018 /* Define codes for all the float formats that we know of. Note
1019 that this is copied from real.h. */
1021 #define UNKNOWN_FLOAT_FORMAT 0
1022 #define IEEE_FLOAT_FORMAT 1
1023 #define VAX_FLOAT_FORMAT 2
1024 #define IBM_FLOAT_FORMAT 3
1026 /* Default to IEEE float if not specified. Nearly all machines use it. */
1027 #ifndef HOST_FLOAT_FORMAT
1028 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1031 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1036 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1041 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1047 __floatdisf (DItype u
)
1049 /* Do the calculation in DFmode
1050 so that we don't lose any of the precision of the high word
1051 while multiplying it. */
1054 /* Protect against double-rounding error.
1055 Represent any low-order bits, that might be truncated in DFmode,
1056 by a bit that won't be lost. The bit can go in anywhere below the
1057 rounding position of the SFmode. A fixed mask and bit position
1058 handles all usual configurations. It doesn't handle the case
1059 of 128-bit DImode, however. */
1060 if (DF_SIZE
< DI_SIZE
1061 && DF_SIZE
> (DI_SIZE
- DF_SIZE
+ SF_SIZE
))
1063 #define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
1064 if (! (- ((DItype
) 1 << DF_SIZE
) < u
1065 && u
< ((DItype
) 1 << DF_SIZE
)))
1067 if ((USItype
) u
& (REP_BIT
- 1))
1071 f
= (SItype
) (u
>> WORD_SIZE
);
1072 f
*= HIGH_HALFWORD_COEFF
;
1073 f
*= HIGH_HALFWORD_COEFF
;
1074 f
+= (USItype
) (u
& (HIGH_WORD_COEFF
- 1));
1080 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1081 /* Reenable the normal types, in case limits.h needs them. */
1094 __fixunsxfsi (XFtype a
)
1096 if (a
>= - (DFtype
) LONG_MIN
)
1097 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1103 /* Reenable the normal types, in case limits.h needs them. */
1116 __fixunsdfsi (DFtype a
)
1118 if (a
>= - (DFtype
) LONG_MIN
)
1119 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1125 /* Reenable the normal types, in case limits.h needs them. */
1138 __fixunssfsi (SFtype a
)
1140 if (a
>= - (SFtype
) LONG_MIN
)
1141 return (SItype
) (a
+ LONG_MIN
) - LONG_MIN
;
1146 /* From here on down, the routines use normal data types. */
1148 #define SItype bogus_type
1149 #define USItype bogus_type
1150 #define DItype bogus_type
1151 #define UDItype bogus_type
1152 #define SFtype bogus_type
1153 #define DFtype bogus_type
1165 /* Like bcmp except the sign is meaningful.
1166 Result is negative if S1 is less than S2,
1167 positive if S1 is greater, 0 if S1 and S2 are equal. */
1170 __gcc_bcmp (unsigned char *s1
, unsigned char *s2
, size_t size
)
1174 unsigned char c1
= *s1
++, c2
= *s2
++;
1191 #if defined(__svr4__) || defined(__alliant__)
1195 /* The Alliant needs the added underscore. */
1196 asm (".globl __builtin_saveregs");
1197 asm ("__builtin_saveregs:");
1198 asm (".globl ___builtin_saveregs");
1199 asm ("___builtin_saveregs:");
1201 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1202 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1203 area and also for a new va_list
1205 /* Save all argument registers in the arg reg save area. The
1206 arg reg save area must have the following layout (according
1218 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1219 asm (" fst.q %f12,16(%sp)");
1221 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1222 asm (" st.l %r17,36(%sp)");
1223 asm (" st.l %r18,40(%sp)");
1224 asm (" st.l %r19,44(%sp)");
1225 asm (" st.l %r20,48(%sp)");
1226 asm (" st.l %r21,52(%sp)");
1227 asm (" st.l %r22,56(%sp)");
1228 asm (" st.l %r23,60(%sp)");
1229 asm (" st.l %r24,64(%sp)");
1230 asm (" st.l %r25,68(%sp)");
1231 asm (" st.l %r26,72(%sp)");
1232 asm (" st.l %r27,76(%sp)");
1234 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1235 va_list structure. Put in into
1236 r16 so that it will be returned
1239 /* Initialize all fields of the new va_list structure. This
1240 structure looks like:
1243 unsigned long ireg_used;
1244 unsigned long freg_used;
1250 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1251 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1252 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1253 asm (" bri %r1"); /* delayed return */
1254 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1256 #else /* not __svr4__ */
1257 #if defined(__PARAGON__)
1259 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1260 * and we stand a better chance of hooking into libraries
1261 * compiled by PGI. [andyp@ssd.intel.com]
1265 asm (".globl __builtin_saveregs");
1266 asm ("__builtin_saveregs:");
1267 asm (".globl ___builtin_saveregs");
1268 asm ("___builtin_saveregs:");
1270 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1271 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1272 area and also for a new va_list
1274 /* Save all argument registers in the arg reg save area. The
1275 arg reg save area must have the following layout (according
1287 asm (" fst.q f8, 0(sp)");
1288 asm (" fst.q f12,16(sp)");
1289 asm (" st.l r16,32(sp)");
1290 asm (" st.l r17,36(sp)");
1291 asm (" st.l r18,40(sp)");
1292 asm (" st.l r19,44(sp)");
1293 asm (" st.l r20,48(sp)");
1294 asm (" st.l r21,52(sp)");
1295 asm (" st.l r22,56(sp)");
1296 asm (" st.l r23,60(sp)");
1297 asm (" st.l r24,64(sp)");
1298 asm (" st.l r25,68(sp)");
1299 asm (" st.l r26,72(sp)");
1300 asm (" st.l r27,76(sp)");
1302 asm (" adds 80,sp,r16"); /* compute the address of the new
1303 va_list structure. Put in into
1304 r16 so that it will be returned
1307 /* Initialize all fields of the new va_list structure. This
1308 structure looks like:
1311 unsigned long ireg_used;
1312 unsigned long freg_used;
1318 asm (" st.l r0, 0(r16)"); /* nfixed */
1319 asm (" st.l r0, 4(r16)"); /* nfloating */
1320 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1321 asm (" bri r1"); /* delayed return */
1322 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1323 #else /* not __PARAGON__ */
1327 asm (".globl ___builtin_saveregs");
1328 asm ("___builtin_saveregs:");
1329 asm (" mov sp,r30");
1330 asm (" andnot 0x0f,sp,sp");
1331 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1333 /* Fill in the __va_struct. */
1334 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1335 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1336 asm (" st.l r18, 8(sp)");
1337 asm (" st.l r19,12(sp)");
1338 asm (" st.l r20,16(sp)");
1339 asm (" st.l r21,20(sp)");
1340 asm (" st.l r22,24(sp)");
1341 asm (" st.l r23,28(sp)");
1342 asm (" st.l r24,32(sp)");
1343 asm (" st.l r25,36(sp)");
1344 asm (" st.l r26,40(sp)");
1345 asm (" st.l r27,44(sp)");
1347 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1348 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1350 /* Fill in the __va_ctl. */
1351 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1352 asm (" st.l r28,84(sp)"); /* pointer to more args */
1353 asm (" st.l r0, 88(sp)"); /* nfixed */
1354 asm (" st.l r0, 92(sp)"); /* nfloating */
1356 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1358 asm (" mov r30,sp");
1359 /* recover stack and pass address to start
1361 #endif /* not __PARAGON__ */
1362 #endif /* not __svr4__ */
1363 #else /* not __i860__ */
1365 asm (".global __builtin_saveregs");
1366 asm ("__builtin_saveregs:");
1367 asm (".global ___builtin_saveregs");
1368 asm ("___builtin_saveregs:");
1369 #ifdef NEED_PROC_COMMAND
1372 asm ("st %i0,[%fp+68]");
1373 asm ("st %i1,[%fp+72]");
1374 asm ("st %i2,[%fp+76]");
1375 asm ("st %i3,[%fp+80]");
1376 asm ("st %i4,[%fp+84]");
1378 asm ("st %i5,[%fp+88]");
1379 #ifdef NEED_TYPE_COMMAND
1380 asm (".type __builtin_saveregs,#function");
1381 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1383 #else /* not __sparc__ */
1384 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1388 asm (" .set nomips16");
1390 asm (" .ent __builtin_saveregs");
1391 asm (" .globl __builtin_saveregs");
1392 asm ("__builtin_saveregs:");
1393 asm (" sw $4,0($30)");
1394 asm (" sw $5,4($30)");
1395 asm (" sw $6,8($30)");
1396 asm (" sw $7,12($30)");
1398 asm (" .end __builtin_saveregs");
1399 #else /* not __mips__, etc. */
1402 __builtin_saveregs ()
1407 #endif /* not __mips__ */
1408 #endif /* not __sparc__ */
1409 #endif /* not __i860__ */
1413 #ifndef inhibit_libc
1415 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1417 /* This is used by the `assert' macro. */
1418 extern void __eprintf (const char *, const char *, unsigned int, const char *)
1419 __attribute__ ((__noreturn__
));
1422 __eprintf (const char *string
, const char *expression
,
1423 unsigned int line
, const char *filename
)
1425 fprintf (stderr
, string
, expression
, line
, filename
);
1435 /* Structure emitted by -a */
1439 const char *filename
;
1443 const unsigned long *addresses
;
1445 /* Older GCC's did not emit these fields. */
1447 const char **functions
;
1448 const long *line_nums
;
1449 const char **filenames
;
1453 #ifdef BLOCK_PROFILER_CODE
1456 #ifndef inhibit_libc
1458 /* Simple minded basic block profiling output dumper for
1459 systems that don't provide tcov support. At present,
1460 it requires atexit and stdio. */
1462 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1466 #include "gbl-ctors.h"
1467 #include "gcov-io.h"
1470 static struct bb
*bb_head
;
1472 static int num_digits (long value
, int base
) __attribute__ ((const));
1474 /* Return the number of digits needed to print a value */
1475 /* __inline__ */ static int num_digits (long value
, int base
)
1477 int minus
= (value
< 0 && base
!= 16);
1478 unsigned long v
= (minus
) ? -value
: value
;
1492 __bb_exit_func (void)
1494 FILE *da_file
, *file
;
1501 i
= strlen (bb_head
->filename
) - 3;
1503 if (!strcmp (bb_head
->filename
+i
, ".da"))
1505 /* Must be -fprofile-arcs not -a.
1506 Dump data in a form that gcov expects. */
1510 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1512 /* If the file exists, and the number of counts in it is the same,
1513 then merge them in. */
1515 if ((da_file
= fopen (ptr
->filename
, "r")) != 0)
1519 if (__read_long (&n_counts
, da_file
, 8) != 0)
1521 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1526 if (n_counts
== ptr
->ncounts
)
1530 for (i
= 0; i
< n_counts
; i
++)
1534 if (__read_long (&v
, da_file
, 8) != 0)
1536 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1540 ptr
->counts
[i
] += v
;
1544 if (fclose (da_file
) == EOF
)
1545 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1548 if ((da_file
= fopen (ptr
->filename
, "w")) == 0)
1550 fprintf (stderr
, "arc profiling: Can't open output file %s.\n",
1555 /* ??? Should first write a header to the file. Preferably, a 4 byte
1556 magic number, 4 bytes containing the time the program was
1557 compiled, 4 bytes containing the last modification time of the
1558 source file, and 4 bytes indicating the compiler options used.
1560 That way we can easily verify that the proper source/executable/
1561 data file combination is being used from gcov. */
1563 if (__write_long (ptr
->ncounts
, da_file
, 8) != 0)
1566 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1572 long *count_ptr
= ptr
->counts
;
1574 for (j
= ptr
->ncounts
; j
> 0; j
--)
1576 if (__write_long (*count_ptr
, da_file
, 8) != 0)
1584 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1588 if (fclose (da_file
) == EOF
)
1589 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1596 /* Must be basic block profiling. Emit a human readable output file. */
1598 file
= fopen ("bb.out", "a");
1607 /* This is somewhat type incorrect, but it avoids worrying about
1608 exactly where time.h is included from. It should be ok unless
1609 a void * differs from other pointer formats, or if sizeof (long)
1610 is < sizeof (time_t). It would be nice if we could assume the
1611 use of rationale standards here. */
1613 time ((void *) &time_value
);
1614 fprintf (file
, "Basic block profiling finished on %s\n", ctime ((void *) &time_value
));
1616 /* We check the length field explicitly in order to allow compatibility
1617 with older GCC's which did not provide it. */
1619 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1622 int func_p
= (ptr
->nwords
>= sizeof (struct bb
)
1623 && ptr
->nwords
<= 1000
1625 int line_p
= (func_p
&& ptr
->line_nums
);
1626 int file_p
= (func_p
&& ptr
->filenames
);
1627 int addr_p
= (ptr
->addresses
!= 0);
1628 long ncounts
= ptr
->ncounts
;
1634 int blk_len
= num_digits (ncounts
, 10);
1639 fprintf (file
, "File %s, %ld basic blocks \n\n",
1640 ptr
->filename
, ncounts
);
1642 /* Get max values for each field. */
1643 for (i
= 0; i
< ncounts
; i
++)
1648 if (cnt_max
< ptr
->counts
[i
])
1649 cnt_max
= ptr
->counts
[i
];
1651 if (addr_p
&& addr_max
< ptr
->addresses
[i
])
1652 addr_max
= ptr
->addresses
[i
];
1654 if (line_p
&& line_max
< ptr
->line_nums
[i
])
1655 line_max
= ptr
->line_nums
[i
];
1659 p
= (ptr
->functions
[i
]) ? (ptr
->functions
[i
]) : "<none>";
1667 p
= (ptr
->filenames
[i
]) ? (ptr
->filenames
[i
]) : "<none>";
1674 addr_len
= num_digits (addr_max
, 16);
1675 cnt_len
= num_digits (cnt_max
, 10);
1676 line_len
= num_digits (line_max
, 10);
1678 /* Now print out the basic block information. */
1679 for (i
= 0; i
< ncounts
; i
++)
1682 " Block #%*d: executed %*ld time(s)",
1684 cnt_len
, ptr
->counts
[i
]);
1687 fprintf (file
, " address= 0x%.*lx", addr_len
,
1691 fprintf (file
, " function= %-*s", func_len
,
1692 (ptr
->functions
[i
]) ? ptr
->functions
[i
] : "<none>");
1695 fprintf (file
, " line= %*ld", line_len
, ptr
->line_nums
[i
]);
1698 fprintf (file
, " file= %s",
1699 (ptr
->filenames
[i
]) ? ptr
->filenames
[i
] : "<none>");
1701 fprintf (file
, "\n");
1704 fprintf (file
, "\n");
1708 fprintf (file
, "\n\n");
1714 __bb_init_func (struct bb
*blocks
)
1716 /* User is supposed to check whether the first word is non-0,
1717 but just in case.... */
1719 if (blocks
->zero_word
)
1722 /* Initialize destructor. */
1724 atexit (__bb_exit_func
);
1726 /* Set up linked list. */
1727 blocks
->zero_word
= 1;
1728 blocks
->next
= bb_head
;
1732 #ifndef MACHINE_STATE_SAVE
1733 #define MACHINE_STATE_SAVE(ID)
1735 #ifndef MACHINE_STATE_RESTORE
1736 #define MACHINE_STATE_RESTORE(ID)
1739 /* Number of buckets in hashtable of basic block addresses. */
1741 #define BB_BUCKETS 311
1743 /* Maximum length of string in file bb.in. */
1745 #define BBINBUFSIZE 500
1747 /* BBINBUFSIZE-1 with double quotes. We could use #BBINBUFSIZE or
1748 "BBINBUFSIZE" but want to avoid trouble with preprocessors. */
1750 #define BBINBUFSIZESTR "499"
1754 struct bb_edge
*next
;
1755 unsigned long src_addr
;
1756 unsigned long dst_addr
;
1757 unsigned long count
;
1762 TRACE_KEEP
= 0, TRACE_ON
= 1, TRACE_OFF
= 2
1767 struct bb_func
*next
;
1770 enum bb_func_mode mode
;
1773 /* This is the connection to the outside world.
1774 The BLOCK_PROFILER macro must set __bb.blocks
1775 and __bb.blockno. */
1778 unsigned long blockno
;
1782 /* Vars to store addrs of source and destination basic blocks
1785 static unsigned long bb_src
= 0;
1786 static unsigned long bb_dst
= 0;
1788 static FILE *bb_tracefile
= (FILE *) 0;
1789 static struct bb_edge
**bb_hashbuckets
= (struct bb_edge
**) 0;
1790 static struct bb_func
*bb_func_head
= (struct bb_func
*) 0;
1791 static unsigned long bb_callcount
= 0;
1792 static int bb_mode
= 0;
1794 static unsigned long *bb_stack
= (unsigned long *) 0;
1795 static size_t bb_stacksize
= 0;
1797 static int reported
= 0;
1800 Always : Print execution frequencies of basic blocks
1802 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1803 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1804 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1805 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1810 /*#include <sys/types.h>*/
1811 #include <sys/stat.h>
1812 /*#include <malloc.h>*/
1814 /* Commands executed by gopen. */
1816 #define GOPENDECOMPRESS "gzip -cd "
1817 #define GOPENCOMPRESS "gzip -c >"
1819 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1820 If it does not compile, simply replace gopen by fopen and delete
1821 '.gz' from any first parameter to gopen. */
1824 gopen (char *fn
, char *mode
)
1832 if (mode
[0] != 'r' && mode
[0] != 'w')
1835 p
= fn
+ strlen (fn
)-1;
1836 use_gzip
= ((p
[-1] == '.' && (p
[0] == 'Z' || p
[0] == 'z'))
1837 || (p
[-2] == '.' && p
[-1] == 'g' && p
[0] == 'z'));
1844 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1845 + sizeof (GOPENDECOMPRESS
));
1846 strcpy (s
, GOPENDECOMPRESS
);
1847 strcpy (s
+ (sizeof (GOPENDECOMPRESS
)-1), fn
);
1848 f
= popen (s
, mode
);
1856 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1857 + sizeof (GOPENCOMPRESS
));
1858 strcpy (s
, GOPENCOMPRESS
);
1859 strcpy (s
+ (sizeof (GOPENCOMPRESS
)-1), fn
);
1860 if (!(f
= popen (s
, mode
)))
1861 f
= fopen (s
, mode
);
1868 return fopen (fn
, mode
);
1878 if (!fstat (fileno (f
), &buf
) && S_ISFIFO (buf
.st_mode
))
1886 #endif /* HAVE_POPEN */
1888 /* Called once per program. */
1891 __bb_exit_trace_func ()
1893 FILE *file
= fopen ("bb.out", "a");
1906 gclose (bb_tracefile
);
1908 fclose (bb_tracefile
);
1909 #endif /* HAVE_POPEN */
1912 /* Check functions in `bb.in'. */
1917 const struct bb_func
*p
;
1918 int printed_something
= 0;
1922 /* This is somewhat type incorrect. */
1923 time ((void *) &time_value
);
1925 for (p
= bb_func_head
; p
!= (struct bb_func
*) 0; p
= p
->next
)
1927 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1929 if (!ptr
->filename
|| (p
->filename
!= (char *) 0 && strcmp (p
->filename
, ptr
->filename
)))
1931 for (blk
= 0; blk
< ptr
->ncounts
; blk
++)
1933 if (!strcmp (p
->funcname
, ptr
->functions
[blk
]))
1938 if (!printed_something
)
1940 fprintf (file
, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value
));
1941 printed_something
= 1;
1944 fprintf (file
, "\tFunction %s", p
->funcname
);
1946 fprintf (file
, " of file %s", p
->filename
);
1947 fprintf (file
, "\n" );
1952 if (printed_something
)
1953 fprintf (file
, "\n");
1959 if (!bb_hashbuckets
)
1963 fprintf (stderr
, "Profiler: out of memory\n");
1973 unsigned long addr_max
= 0;
1974 unsigned long cnt_max
= 0;
1978 /* This is somewhat type incorrect, but it avoids worrying about
1979 exactly where time.h is included from. It should be ok unless
1980 a void * differs from other pointer formats, or if sizeof (long)
1981 is < sizeof (time_t). It would be nice if we could assume the
1982 use of rationale standards here. */
1984 time ((void *) &time_value
);
1985 fprintf (file
, "Basic block jump tracing");
1987 switch (bb_mode
& 12)
1990 fprintf (file
, " (with call)");
1994 /* Print nothing. */
1998 fprintf (file
, " (with call & ret)");
2002 fprintf (file
, " (with ret)");
2006 fprintf (file
, " finished on %s\n", ctime ((void *) &time_value
));
2008 for (i
= 0; i
< BB_BUCKETS
; i
++)
2010 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2011 for ( ; bucket
; bucket
= bucket
->next
)
2013 if (addr_max
< bucket
->src_addr
)
2014 addr_max
= bucket
->src_addr
;
2015 if (addr_max
< bucket
->dst_addr
)
2016 addr_max
= bucket
->dst_addr
;
2017 if (cnt_max
< bucket
->count
)
2018 cnt_max
= bucket
->count
;
2021 addr_len
= num_digits (addr_max
, 16);
2022 cnt_len
= num_digits (cnt_max
, 10);
2024 for ( i
= 0; i
< BB_BUCKETS
; i
++)
2026 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2027 for ( ; bucket
; bucket
= bucket
->next
)
2029 fprintf (file
, "Jump from block 0x%.*lx to "
2030 "block 0x%.*lx executed %*lu time(s)\n",
2031 addr_len
, bucket
->src_addr
,
2032 addr_len
, bucket
->dst_addr
,
2033 cnt_len
, bucket
->count
);
2037 fprintf (file
, "\n");
2045 /* Free allocated memory. */
2050 struct bb_func
*old
= f
;
2053 if (old
->funcname
) free (old
->funcname
);
2054 if (old
->filename
) free (old
->filename
);
2065 for (i
= 0; i
< BB_BUCKETS
; i
++)
2067 struct bb_edge
*old
, *bucket
= bb_hashbuckets
[i
];
2072 bucket
= bucket
->next
;
2076 free (bb_hashbuckets
);
2079 for (b
= bb_head
; b
; b
= b
->next
)
2080 if (b
->flags
) free (b
->flags
);
2083 /* Called once per program. */
2089 char buf
[BBINBUFSIZE
];
2092 enum bb_func_mode m
;
2095 /* Initialize destructor. */
2096 atexit (__bb_exit_func
);
2098 if (!(file
= fopen ("bb.in", "r")))
2101 while(fscanf (file
, " %" BBINBUFSIZESTR
"s ", buf
) != EOF
)
2113 if (!strcmp (p
, "__bb_trace__"))
2115 else if (!strcmp (p
, "__bb_jumps__"))
2117 else if (!strcmp (p
, "__bb_hidecall__"))
2119 else if (!strcmp (p
, "__bb_showret__"))
2123 struct bb_func
*f
= (struct bb_func
*) malloc (sizeof (struct bb_func
));
2127 f
->next
= bb_func_head
;
2128 if ((pos
= strchr (p
, ':')))
2130 if (!(f
->funcname
= (char *) malloc (strlen (pos
+1)+1)))
2132 strcpy (f
->funcname
, pos
+1);
2134 if ((f
->filename
= (char *) malloc (l
+1)))
2136 strncpy (f
->filename
, p
, l
);
2137 f
->filename
[l
] = '\0';
2140 f
->filename
= (char *) 0;
2144 if (!(f
->funcname
= (char *) malloc (strlen (p
)+1)))
2146 strcpy (f
->funcname
, p
);
2147 f
->filename
= (char *) 0;
2159 bb_tracefile
= gopen ("bbtrace.gz", "w");
2164 bb_tracefile
= fopen ("bbtrace", "w");
2166 #endif /* HAVE_POPEN */
2170 bb_hashbuckets
= (struct bb_edge
**)
2171 malloc (BB_BUCKETS
* sizeof (struct bb_edge
*));
2173 /* Use a loop here rather than calling bzero to avoid having to
2174 conditionalize its existance. */
2175 for (i
= 0; i
< BB_BUCKETS
; i
++)
2176 bb_hashbuckets
[i
] = 0;
2182 bb_stack
= (unsigned long *) malloc (bb_stacksize
* sizeof (*bb_stack
));
2185 /* Initialize destructor. */
2186 atexit (__bb_exit_trace_func
);
2189 /* Called upon entering a basic block. */
2194 struct bb_edge
*bucket
;
2196 MACHINE_STATE_SAVE("1")
2198 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2201 bb_dst
= __bb
.blocks
->addresses
[__bb
.blockno
];
2202 __bb
.blocks
->counts
[__bb
.blockno
]++;
2206 fwrite (&bb_dst
, sizeof (unsigned long), 1, bb_tracefile
);
2211 struct bb_edge
**startbucket
, **oldnext
;
2213 oldnext
= startbucket
2214 = & bb_hashbuckets
[ (((int) bb_src
*8) ^ (int) bb_dst
) % BB_BUCKETS
];
2215 bucket
= *startbucket
;
2217 for (bucket
= *startbucket
; bucket
;
2218 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2220 if (bucket
->src_addr
== bb_src
2221 && bucket
->dst_addr
== bb_dst
)
2224 *oldnext
= bucket
->next
;
2225 bucket
->next
= *startbucket
;
2226 *startbucket
= bucket
;
2231 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2237 fprintf (stderr
, "Profiler: out of memory\n");
2244 bucket
->src_addr
= bb_src
;
2245 bucket
->dst_addr
= bb_dst
;
2246 bucket
->next
= *startbucket
;
2247 *startbucket
= bucket
;
2258 MACHINE_STATE_RESTORE("1")
2262 /* Called when returning from a function and `__bb_showret__' is set. */
2265 __bb_trace_func_ret ()
2267 struct bb_edge
*bucket
;
2269 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2274 struct bb_edge
**startbucket
, **oldnext
;
2276 oldnext
= startbucket
2277 = & bb_hashbuckets
[ (((int) bb_dst
* 8) ^ (int) bb_src
) % BB_BUCKETS
];
2278 bucket
= *startbucket
;
2280 for (bucket
= *startbucket
; bucket
;
2281 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2283 if (bucket
->src_addr
== bb_dst
2284 && bucket
->dst_addr
== bb_src
)
2287 *oldnext
= bucket
->next
;
2288 bucket
->next
= *startbucket
;
2289 *startbucket
= bucket
;
2294 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2300 fprintf (stderr
, "Profiler: out of memory\n");
2307 bucket
->src_addr
= bb_dst
;
2308 bucket
->dst_addr
= bb_src
;
2309 bucket
->next
= *startbucket
;
2310 *startbucket
= bucket
;
2323 /* Called upon entering the first function of a file. */
2326 __bb_init_file (struct bb
*blocks
)
2329 const struct bb_func
*p
;
2330 long blk
, ncounts
= blocks
->ncounts
;
2331 const char **functions
= blocks
->functions
;
2333 /* Set up linked list. */
2334 blocks
->zero_word
= 1;
2335 blocks
->next
= bb_head
;
2340 || !(blocks
->flags
= (char *) malloc (sizeof (char) * blocks
->ncounts
)))
2343 for (blk
= 0; blk
< ncounts
; blk
++)
2344 blocks
->flags
[blk
] = 0;
2346 for (blk
= 0; blk
< ncounts
; blk
++)
2348 for (p
= bb_func_head
; p
; p
= p
->next
)
2350 if (!strcmp (p
->funcname
, functions
[blk
])
2351 && (!p
->filename
|| !strcmp (p
->filename
, blocks
->filename
)))
2353 blocks
->flags
[blk
] |= p
->mode
;
2360 /* Called when exiting from a function. */
2366 MACHINE_STATE_SAVE("2")
2370 if ((bb_mode
& 12) && bb_stacksize
> bb_callcount
)
2372 bb_src
= bb_stack
[bb_callcount
];
2374 __bb_trace_func_ret ();
2380 MACHINE_STATE_RESTORE("2")
2384 /* Called when entering a function. */
2387 __bb_init_trace_func (struct bb
*blocks
, unsigned long blockno
)
2389 static int trace_init
= 0;
2391 MACHINE_STATE_SAVE("3")
2393 if (!blocks
->zero_word
)
2400 __bb_init_file (blocks
);
2410 if (bb_callcount
>= bb_stacksize
)
2412 size_t newsize
= bb_callcount
+ 100;
2414 bb_stack
= (unsigned long *) realloc (bb_stack
, newsize
);
2419 fprintf (stderr
, "Profiler: out of memory\n");
2423 goto stack_overflow
;
2425 bb_stacksize
= newsize
;
2427 bb_stack
[bb_callcount
] = bb_src
;
2438 else if (blocks
->flags
&& (blocks
->flags
[blockno
] & TRACE_ON
))
2444 bb_stack
[bb_callcount
] = bb_src
;
2447 MACHINE_STATE_RESTORE("3")
2450 #endif /* not inhibit_libc */
2451 #endif /* not BLOCK_PROFILER_CODE */
2455 unsigned int __shtab
[] = {
2456 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2457 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2458 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2459 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2460 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2461 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2462 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2463 0x10000000, 0x20000000, 0x40000000, 0x80000000
2467 #ifdef L_clear_cache
2468 /* Clear part of an instruction cache. */
2470 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2473 __clear_cache (char *beg
, char *end
)
2475 #ifdef CLEAR_INSN_CACHE
2476 CLEAR_INSN_CACHE (beg
, end
);
2478 #ifdef INSN_CACHE_SIZE
2479 static char array
[INSN_CACHE_SIZE
+ INSN_CACHE_PLANE_SIZE
+ INSN_CACHE_LINE_WIDTH
];
2480 static int initialized
;
2484 typedef (*function_ptr
) ();
2486 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2487 /* It's cheaper to clear the whole cache.
2488 Put in a series of jump instructions so that calling the beginning
2489 of the cache will clear the whole thing. */
2493 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2494 & -INSN_CACHE_LINE_WIDTH
);
2495 int end_ptr
= ptr
+ INSN_CACHE_SIZE
;
2497 while (ptr
< end_ptr
)
2499 *(INSTRUCTION_TYPE
*)ptr
2500 = JUMP_AHEAD_INSTRUCTION
+ INSN_CACHE_LINE_WIDTH
;
2501 ptr
+= INSN_CACHE_LINE_WIDTH
;
2503 *(INSTRUCTION_TYPE
*) (ptr
- INSN_CACHE_LINE_WIDTH
) = RETURN_INSTRUCTION
;
2508 /* Call the beginning of the sequence. */
2509 (((function_ptr
) (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2510 & -INSN_CACHE_LINE_WIDTH
))
2513 #else /* Cache is large. */
2517 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2518 & -INSN_CACHE_LINE_WIDTH
);
2520 while (ptr
< (int) array
+ sizeof array
)
2522 *(INSTRUCTION_TYPE
*)ptr
= RETURN_INSTRUCTION
;
2523 ptr
+= INSN_CACHE_LINE_WIDTH
;
2529 /* Find the location in array that occupies the same cache line as BEG. */
2531 offset
= ((int) beg
& -INSN_CACHE_LINE_WIDTH
) & (INSN_CACHE_PLANE_SIZE
- 1);
2532 start_addr
= (((int) (array
+ INSN_CACHE_PLANE_SIZE
- 1)
2533 & -INSN_CACHE_PLANE_SIZE
)
2536 /* Compute the cache alignment of the place to stop clearing. */
2537 #if 0 /* This is not needed for gcc's purposes. */
2538 /* If the block to clear is bigger than a cache plane,
2539 we clear the entire cache, and OFFSET is already correct. */
2540 if (end
< beg
+ INSN_CACHE_PLANE_SIZE
)
2542 offset
= (((int) (end
+ INSN_CACHE_LINE_WIDTH
- 1)
2543 & -INSN_CACHE_LINE_WIDTH
)
2544 & (INSN_CACHE_PLANE_SIZE
- 1));
2546 #if INSN_CACHE_DEPTH > 1
2547 end_addr
= (start_addr
& -INSN_CACHE_PLANE_SIZE
) + offset
;
2548 if (end_addr
<= start_addr
)
2549 end_addr
+= INSN_CACHE_PLANE_SIZE
;
2551 for (plane
= 0; plane
< INSN_CACHE_DEPTH
; plane
++)
2553 int addr
= start_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2554 int stop
= end_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2556 while (addr
!= stop
)
2558 /* Call the return instruction at ADDR. */
2559 ((function_ptr
) addr
) ();
2561 addr
+= INSN_CACHE_LINE_WIDTH
;
2564 #else /* just one plane */
2567 /* Call the return instruction at START_ADDR. */
2568 ((function_ptr
) start_addr
) ();
2570 start_addr
+= INSN_CACHE_LINE_WIDTH
;
2572 while ((start_addr
% INSN_CACHE_SIZE
) != offset
);
2573 #endif /* just one plane */
2574 #endif /* Cache is large */
2575 #endif /* Cache exists */
2576 #endif /* CLEAR_INSN_CACHE */
2579 #endif /* L_clear_cache */
2583 /* Jump to a trampoline, loading the static chain address. */
2585 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2597 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2601 mprotect (char *addr
, int len
, int prot
)
2618 if (VirtualProtect (addr
, len
, np
, &op
))
2624 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2626 #ifdef TRANSFER_FROM_TRAMPOLINE
2627 TRANSFER_FROM_TRAMPOLINE
2630 #if defined (NeXT) && defined (__MACH__)
2632 /* Make stack executable so we can call trampolines on stack.
2633 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2637 #include <mach/mach.h>
2641 __enable_execute_stack (char *addr
)
2644 char *eaddr
= addr
+ TRAMPOLINE_SIZE
;
2645 vm_address_t a
= (vm_address_t
) addr
;
2647 /* turn on execute access on stack */
2648 r
= vm_protect (task_self (), a
, TRAMPOLINE_SIZE
, FALSE
, VM_PROT_ALL
);
2649 if (r
!= KERN_SUCCESS
)
2651 mach_error("vm_protect VM_PROT_ALL", r
);
2655 /* We inline the i-cache invalidation for speed */
2657 #ifdef CLEAR_INSN_CACHE
2658 CLEAR_INSN_CACHE (addr
, eaddr
);
2660 __clear_cache ((int) addr
, (int) eaddr
);
2664 #endif /* defined (NeXT) && defined (__MACH__) */
2668 /* Make stack executable so we can call trampolines on stack.
2669 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2671 #include <sys/mman.h>
2672 #include <sys/vmparam.h>
2673 #include <machine/machparam.h>
2676 __enable_execute_stack ()
2679 static unsigned lowest
= USRSTACK
;
2680 unsigned current
= (unsigned) &fp
& -NBPG
;
2682 if (lowest
> current
)
2684 unsigned len
= lowest
- current
;
2685 mremap (current
, &len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_PRIVATE
);
2689 /* Clear instruction cache in case an old trampoline is in it. */
2692 #endif /* __convex__ */
2696 /* Modified from the convex -code above. */
2698 #include <sys/param.h>
2700 #include <sys/m88kbcs.h>
2703 __enable_execute_stack ()
2706 static unsigned long lowest
= USRSTACK
;
2707 unsigned long current
= (unsigned long) &save_errno
& -NBPC
;
2709 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2710 address is seen as 'negative'. That is the case with the stack. */
2713 if (lowest
> current
)
2715 unsigned len
=lowest
-current
;
2716 memctl(current
,len
,MCT_TEXT
);
2720 memctl(current
,NBPC
,MCT_TEXT
);
2724 #endif /* __sysV88__ */
2728 #include <sys/signal.h>
2731 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2732 so define it here, because we need it in __clear_insn_cache below */
2733 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2734 hence we enable this stuff only if MCT_TEXT is #define'd. */
2749 /* Clear instruction cache so we can call trampolines on stack.
2750 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2753 __clear_insn_cache ()
2758 /* Preserve errno, because users would be surprised to have
2759 errno changing without explicitly calling any system-call. */
2762 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2763 No need to use an address derived from _start or %sp, as 0 works also. */
2764 memctl(0, 4096, MCT_TEXT
);
2769 #endif /* __sysV68__ */
2773 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2775 #include <sys/mman.h>
2776 #include <sys/types.h>
2777 #include <sys/param.h>
2778 #include <sys/vmmac.h>
2780 /* Modified from the convex -code above.
2781 mremap promises to clear the i-cache. */
2784 __enable_execute_stack ()
2787 if (mprotect (((unsigned int)&fp
/PAGSIZ
)*PAGSIZ
, PAGSIZ
,
2788 PROT_READ
|PROT_WRITE
|PROT_EXEC
))
2790 perror ("mprotect in __enable_execute_stack");
2795 #endif /* __pyr__ */
2797 #if defined (sony_news) && defined (SYSTYPE_BSD)
2800 #include <sys/types.h>
2801 #include <sys/param.h>
2802 #include <syscall.h>
2803 #include <machine/sysnews.h>
2805 /* cacheflush function for NEWS-OS 4.2.
2806 This function is called from trampoline-initialize code
2807 defined in config/mips/mips.h. */
2810 cacheflush (char *beg
, int size
, int flag
)
2812 if (syscall (SYS_sysnews
, NEWS_CACHEFLUSH
, beg
, size
, FLUSH_BCACHE
))
2814 perror ("cache_flush");
2820 #endif /* sony_news */
2821 #endif /* L_trampoline */
2826 #include "gbl-ctors.h"
2827 /* Some systems use __main in a way incompatible with its use in gcc, in these
2828 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2829 give the same symbol without quotes for an alternative entry point. You
2830 must define both, or neither. */
2832 #define NAME__MAIN "__main"
2833 #define SYMBOL__MAIN __main
2836 #ifdef INIT_SECTION_ASM_OP
2837 #undef HAS_INIT_SECTION
2838 #define HAS_INIT_SECTION
2841 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2843 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2844 code to run constructors. In that case, we need to handle EH here, too. */
2846 #ifdef EH_FRAME_SECTION
2848 extern unsigned char __EH_FRAME_BEGIN__
[];
2851 /* Run all the global destructors on exit from the program. */
2854 __do_global_dtors ()
2856 #ifdef DO_GLOBAL_DTORS_BODY
2857 DO_GLOBAL_DTORS_BODY
;
2859 static func_ptr
*p
= __DTOR_LIST__
+ 1;
2866 #ifdef EH_FRAME_SECTION
2867 __deregister_frame_info (__EH_FRAME_BEGIN__
);
2872 #ifndef HAS_INIT_SECTION
2873 /* Run all the global constructors on entry to the program. */
2876 __do_global_ctors ()
2878 #ifdef EH_FRAME_SECTION
2880 static struct object object
;
2881 __register_frame_info (__EH_FRAME_BEGIN__
, &object
);
2884 DO_GLOBAL_CTORS_BODY
;
2885 atexit (__do_global_dtors
);
2887 #endif /* no HAS_INIT_SECTION */
2889 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2890 /* Subroutine called automatically by `main'.
2891 Compiling a global function named `main'
2892 produces an automatic call to this function at the beginning.
2894 For many systems, this routine calls __do_global_ctors.
2895 For systems which support a .init section we use the .init section
2896 to run __do_global_ctors, so we need not do anything here. */
2901 /* Support recursive calls to `main': run initializers just once. */
2902 static int initialized
;
2906 __do_global_ctors ();
2909 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2911 #endif /* L__main */
2912 #endif /* __CYGWIN__ */
2916 #include "gbl-ctors.h"
2918 /* Provide default definitions for the lists of constructors and
2919 destructors, so that we don't get linker errors. These symbols are
2920 intentionally bss symbols, so that gld and/or collect will provide
2921 the right values. */
2923 /* We declare the lists here with two elements each,
2924 so that they are valid empty lists if no other definition is loaded.
2926 If we are using the old "set" extensions to have the gnu linker
2927 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2928 must be in the bss/common section.
2930 Long term no port should use those extensions. But many still do. */
2931 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2932 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2933 func_ptr __CTOR_LIST__
[2] = {0, 0};
2934 func_ptr __DTOR_LIST__
[2] = {0, 0};
2936 func_ptr __CTOR_LIST__
[2];
2937 func_ptr __DTOR_LIST__
[2];
2939 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2940 #endif /* L_ctors */
2944 #include "gbl-ctors.h"
2952 static func_ptr
*atexit_chain
= 0;
2953 static long atexit_chain_length
= 0;
2954 static volatile long last_atexit_chain_slot
= -1;
2957 atexit (func_ptr func
)
2959 if (++last_atexit_chain_slot
== atexit_chain_length
)
2961 atexit_chain_length
+= 32;
2963 atexit_chain
= (func_ptr
*) realloc (atexit_chain
, atexit_chain_length
2964 * sizeof (func_ptr
));
2966 atexit_chain
= (func_ptr
*) malloc (atexit_chain_length
2967 * sizeof (func_ptr
));
2970 atexit_chain_length
= 0;
2971 last_atexit_chain_slot
= -1;
2976 atexit_chain
[last_atexit_chain_slot
] = func
;
2980 extern void _cleanup ();
2981 extern void _exit () __attribute__ ((noreturn
));
2988 for ( ; last_atexit_chain_slot
-- >= 0; )
2990 (*atexit_chain
[last_atexit_chain_slot
+ 1]) ();
2991 atexit_chain
[last_atexit_chain_slot
+ 1] = 0;
2993 free (atexit_chain
);
3006 /* Simple; we just need a wrapper for ON_EXIT. */
3008 atexit (func_ptr func
)
3010 return ON_EXIT (func
);
3013 #endif /* ON_EXIT */
3014 #endif /* NEED_ATEXIT */
3022 /* Shared exception handling support routines. */
3024 extern void __default_terminate (void) __attribute__ ((__noreturn__
));
3027 __default_terminate ()
3032 void (*__terminate_func
)() = __default_terminate
;
3037 (*__terminate_func
)();
3041 __throw_type_match (void *catch_type
, void *throw_type
, void *obj
)
3044 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3045 catch_type
, throw_type
);
3047 if (strcmp ((const char *)catch_type
, (const char *)throw_type
) == 0)
3058 /* Include definitions of EH context and table layout */
3060 #include "eh-common.h"
3061 #ifndef inhibit_libc
3065 /* Allocate and return a new EH context structure. */
3067 extern void __throw ();
3072 struct eh_full_context
{
3073 struct eh_context c
;
3075 } *ehfc
= (struct eh_full_context
*) malloc (sizeof *ehfc
);
3080 memset (ehfc
, 0, sizeof *ehfc
);
3082 ehfc
->c
.dynamic_handler_chain
= (void **) ehfc
->top_elt
;
3084 /* This should optimize out entirely. This should always be true,
3085 but just in case it ever isn't, don't allow bogus code to be
3088 if ((void*)(&ehfc
->c
) != (void*)ehfc
)
3095 static __gthread_key_t eh_context_key
;
3097 /* Destructor for struct eh_context. */
3099 eh_context_free (void *ptr
)
3101 __gthread_key_dtor (eh_context_key
, ptr
);
3107 /* Pointer to function to return EH context. */
3109 static struct eh_context
*eh_context_initialize ();
3110 static struct eh_context
*eh_context_static ();
3112 static struct eh_context
*eh_context_specific ();
3115 static struct eh_context
*(*get_eh_context
) () = &eh_context_initialize
;
3117 /* Routine to get EH context.
3118 This one will simply call the function pointer. */
3123 return (void *) (*get_eh_context
) ();
3126 /* Get and set the language specific info pointer. */
3131 struct eh_context
*eh
= (*get_eh_context
) ();
3135 #ifdef DWARF2_UNWIND_INFO
3136 static int dwarf_reg_size_table_initialized
= 0;
3137 static char dwarf_reg_size_table
[FIRST_PSEUDO_REGISTER
];
3140 init_reg_size_table ()
3142 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table
);
3143 dwarf_reg_size_table_initialized
= 1;
3149 eh_threads_initialize ()
3151 /* Try to create the key. If it fails, revert to static method,
3152 otherwise start using thread specific EH contexts. */
3153 if (__gthread_key_create (&eh_context_key
, &eh_context_free
) == 0)
3154 get_eh_context
= &eh_context_specific
;
3156 get_eh_context
= &eh_context_static
;
3158 #endif /* no __GTHREADS */
3160 /* Initialize EH context.
3161 This will be called only once, since we change GET_EH_CONTEXT
3162 pointer to another routine. */
3164 static struct eh_context
*
3165 eh_context_initialize ()
3169 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
3170 /* Make sure that get_eh_context does not point to us anymore.
3171 Some systems have dummy thread routines in their libc that
3172 return a success (Solaris 2.6 for example). */
3173 if (__gthread_once (&once
, eh_threads_initialize
) != 0
3174 || get_eh_context
== &eh_context_initialize
)
3176 /* Use static version of EH context. */
3177 get_eh_context
= &eh_context_static
;
3179 #ifdef DWARF2_UNWIND_INFO
3181 static __gthread_once_t once_regsizes
= __GTHREAD_ONCE_INIT
;
3182 if (__gthread_once (&once_regsizes
, init_reg_size_table
) != 0
3183 || ! dwarf_reg_size_table_initialized
)
3184 init_reg_size_table ();
3188 #else /* no __GTHREADS */
3190 /* Use static version of EH context. */
3191 get_eh_context
= &eh_context_static
;
3193 #ifdef DWARF2_UNWIND_INFO
3194 init_reg_size_table ();
3197 #endif /* no __GTHREADS */
3199 return (*get_eh_context
) ();
3202 /* Return a static EH context. */
3204 static struct eh_context
*
3205 eh_context_static ()
3207 static struct eh_context eh
;
3208 static int initialized
;
3209 static void *top_elt
[2];
3214 memset (&eh
, 0, sizeof eh
);
3215 eh
.dynamic_handler_chain
= top_elt
;
3221 /* Return a thread specific EH context. */
3223 static struct eh_context
*
3224 eh_context_specific ()
3226 struct eh_context
*eh
;
3227 eh
= (struct eh_context
*) __gthread_getspecific (eh_context_key
);
3230 eh
= new_eh_context ();
3231 if (__gthread_setspecific (eh_context_key
, (void *) eh
) != 0)
3239 /* Support routines for setjmp/longjmp exception handling. */
3241 /* Calls to __sjthrow are generated by the compiler when an exception
3242 is raised when using the setjmp/longjmp exception handling codegen
3245 #ifdef DONT_USE_BUILTIN_SETJMP
3246 extern void longjmp (void *, int);
3249 /* Routine to get the head of the current thread's dynamic handler chain
3250 use for exception handling. */
3253 __get_dynamic_handler_chain ()
3255 struct eh_context
*eh
= (*get_eh_context
) ();
3256 return &eh
->dynamic_handler_chain
;
3259 /* This is used to throw an exception when the setjmp/longjmp codegen
3260 method is used for exception handling.
3262 We call __terminate if there are no handlers left. Otherwise we run the
3263 cleanup actions off the dynamic cleanup stack, and pop the top of the
3264 dynamic handler chain, and use longjmp to transfer back to the associated
3267 extern void __sjthrow (void) __attribute__ ((__noreturn__
));
3272 struct eh_context
*eh
= (*get_eh_context
) ();
3273 void ***dhc
= &eh
->dynamic_handler_chain
;
3275 void (*func
)(void *, int);
3279 /* The cleanup chain is one word into the buffer. Get the cleanup
3281 cleanup
= (void***)&(*dhc
)[1];
3283 /* If there are any cleanups in the chain, run them now. */
3287 void **buf
= (void**)store
;
3292 #ifdef DONT_USE_BUILTIN_SETJMP
3293 if (! setjmp (&buf
[2]))
3295 if (! __builtin_setjmp (&buf
[2]))
3301 func
= (void(*)(void*, int))cleanup
[0][1];
3302 arg
= (void*)cleanup
[0][2];
3304 /* Update this before running the cleanup. */
3305 cleanup
[0] = (void **)cleanup
[0][0];
3318 /* We must call terminate if we try and rethrow an exception, when
3319 there is no exception currently active and when there are no
3321 if (! eh
->info
|| (*dhc
)[0] == 0)
3324 /* Find the jmpbuf associated with the top element of the dynamic
3325 handler chain. The jumpbuf starts two words into the buffer. */
3326 jmpbuf
= &(*dhc
)[2];
3328 /* Then we pop the top element off the dynamic handler chain. */
3329 *dhc
= (void**)(*dhc
)[0];
3331 /* And then we jump to the handler. */
3333 #ifdef DONT_USE_BUILTIN_SETJMP
3334 longjmp (jmpbuf
, 1);
3336 __builtin_longjmp (jmpbuf
, 1);
3340 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3341 handler, then pop the handler off the dynamic handler stack, and
3342 then throw. This is used to skip the first handler, and transfer
3343 control to the next handler in the dynamic handler stack. */
3345 extern void __sjpopnthrow (void) __attribute__ ((__noreturn__
));
3350 struct eh_context
*eh
= (*get_eh_context
) ();
3351 void ***dhc
= &eh
->dynamic_handler_chain
;
3352 void (*func
)(void *, int);
3356 /* The cleanup chain is one word into the buffer. Get the cleanup
3358 cleanup
= (void***)&(*dhc
)[1];
3360 /* If there are any cleanups in the chain, run them now. */
3364 void **buf
= (void**)store
;
3369 #ifdef DONT_USE_BUILTIN_SETJMP
3370 if (! setjmp (&buf
[2]))
3372 if (! __builtin_setjmp (&buf
[2]))
3378 func
= (void(*)(void*, int))cleanup
[0][1];
3379 arg
= (void*)cleanup
[0][2];
3381 /* Update this before running the cleanup. */
3382 cleanup
[0] = (void **)cleanup
[0][0];
3395 /* Then we pop the top element off the dynamic handler chain. */
3396 *dhc
= (void**)(*dhc
)[0];
3401 /* Support code for all exception region-based exception handling. */
3404 __eh_rtime_match (void *rtime
)
3407 __eh_matcher matcher
;
3410 info
= *(__get_eh_info ());
3411 matcher
= ((__eh_info
*)info
)->match_function
;
3414 #ifndef inhibit_libc
3415 fprintf (stderr
, "Internal Compiler Bug: No runtime type matcher.");
3419 ret
= (*matcher
) (info
, rtime
, (void *)0);
3420 return (ret
!= NULL
);
3423 /* This value identifies the place from which an exception is being
3426 #ifdef EH_TABLE_LOOKUP
3432 #ifdef DWARF2_UNWIND_INFO
3434 /* Return the table version of an exception descriptor */
3437 __get_eh_table_version (exception_descriptor
*table
)
3439 return table
->lang
.version
;
3442 /* Return the originating table language of an exception descriptor */
3445 __get_eh_table_language (exception_descriptor
*table
)
3447 return table
->lang
.language
;
3450 /* This routine takes a PC and a pointer to the exception region TABLE for
3451 its translation unit, and returns the address of the exception handler
3452 associated with the closest exception table handler entry associated
3453 with that PC, or 0 if there are no table entries the PC fits in.
3455 In the advent of a tie, we have to give the last entry, as it represents
3459 old_find_exception_handler (void *pc
, old_exception_table
*table
)
3466 /* We can't do a binary search because the table isn't guaranteed
3467 to be sorted from function to function. */
3468 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3470 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3472 /* This can apply. Make sure it is at least as small as
3473 the previous best. */
3474 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3475 && table
[pos
].start_region
>= table
[best
].start_region
))
3478 /* But it is sorted by starting PC within a function. */
3479 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3483 return table
[best
].exception_handler
;
3489 /* find_exception_handler finds the correct handler, if there is one, to
3490 handle an exception.
3491 returns a pointer to the handler which controlled should be transferred
3492 to, or NULL if there is nothing left.
3494 PC - pc where the exception originates. If this is a rethrow,
3495 then this starts out as a pointer to the exception table
3496 entry we wish to rethrow out of.
3497 TABLE - exception table for the current module.
3498 EH_INFO - eh info pointer for this exception.
3499 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3500 CLEANUP - returned flag indicating whether this is a cleanup handler.
3503 find_exception_handler (void *pc
, exception_descriptor
*table
,
3504 __eh_info
*eh_info
, int rethrow
, int *cleanup
)
3507 void *retval
= NULL
;
3512 /* The new model assumed the table is sorted inner-most out so the
3513 first region we find which matches is the correct one */
3515 exception_table
*tab
= &(table
->table
[0]);
3517 /* Subtract 1 from the PC to avoid hitting the next region */
3520 /* pc is actually the region table entry to rethrow out of */
3521 pos
= ((exception_table
*) pc
) - tab
;
3522 pc
= ((exception_table
*) pc
)->end_region
- 1;
3524 /* The label is always on the LAST handler entry for a region,
3525 so we know the next entry is a different region, even if the
3526 addresses are the same. Make sure its not end of table tho. */
3527 if (tab
[pos
].start_region
!= (void *) -1)
3533 /* We can't do a binary search because the table is in inner-most
3534 to outermost address ranges within functions */
3535 for ( ; tab
[pos
].start_region
!= (void *) -1; pos
++)
3537 if (tab
[pos
].start_region
<= pc
&& tab
[pos
].end_region
> pc
)
3539 if (tab
[pos
].match_info
)
3541 __eh_matcher matcher
= eh_info
->match_function
;
3542 /* match info but no matcher is NOT a match */
3545 void *ret
= (*matcher
)((void *) eh_info
,
3546 tab
[pos
].match_info
, table
);
3550 retval
= tab
[pos
].exception_handler
;
3559 retval
= tab
[pos
].exception_handler
;
3566 #endif /* DWARF2_UNWIND_INFO */
3567 #endif /* EH_TABLE_LOOKUP */
3569 #ifdef DWARF2_UNWIND_INFO
3570 /* Support code for exception handling using static unwind information. */
3574 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3575 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3576 avoid a warning about casting between int and pointer of different
3579 typedef int ptr_type
__attribute__ ((mode (pointer
)));
3581 #ifdef INCOMING_REGNO
3582 /* Is the saved value for register REG in frame UDATA stored in a register
3583 window in the previous frame? */
3585 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3586 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3587 compiled functions won't work with the frame-unwind stuff here.
3588 Perhaps the entireity of in_reg_window should be conditional on having
3589 seen a DW_CFA_GNU_window_save? */
3590 #define target_flags 0
3593 in_reg_window (int reg
, frame_state
*udata
)
3595 if (udata
->saved
[reg
] == REG_SAVED_REG
)
3596 return INCOMING_REGNO (reg
) == reg
;
3597 if (udata
->saved
[reg
] != REG_SAVED_OFFSET
)
3600 #ifdef STACK_GROWS_DOWNWARD
3601 return udata
->reg_or_offset
[reg
] > 0;
3603 return udata
->reg_or_offset
[reg
] < 0;
3607 static inline int in_reg_window (int reg
, frame_state
*udata
) { return 0; }
3608 #endif /* INCOMING_REGNO */
3610 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3611 frame called by UDATA or 0. */
3614 get_reg_addr (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3616 while (udata
->saved
[reg
] == REG_SAVED_REG
)
3618 reg
= udata
->reg_or_offset
[reg
];
3619 if (in_reg_window (reg
, udata
))
3625 if (udata
->saved
[reg
] == REG_SAVED_OFFSET
)
3626 return (word_type
*)(udata
->cfa
+ udata
->reg_or_offset
[reg
]);
3631 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3632 frame called by UDATA or 0. */
3634 static inline void *
3635 get_reg (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3637 return (void *)(ptr_type
) *get_reg_addr (reg
, udata
, sub_udata
);
3640 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3643 put_reg (unsigned reg
, void *val
, frame_state
*udata
)
3645 *get_reg_addr (reg
, udata
, NULL
) = (word_type
)(ptr_type
) val
;
3648 /* Copy the saved value for register REG from frame UDATA to frame
3649 TARGET_UDATA. Unlike the previous two functions, this can handle
3650 registers that are not one word large. */
3653 copy_reg (unsigned reg
, frame_state
*udata
, frame_state
*target_udata
)
3655 word_type
*preg
= get_reg_addr (reg
, udata
, NULL
);
3656 word_type
*ptreg
= get_reg_addr (reg
, target_udata
, NULL
);
3658 memcpy (ptreg
, preg
, dwarf_reg_size_table
[reg
]);
3661 /* Retrieve the return address for frame UDATA. */
3663 static inline void *
3664 get_return_addr (frame_state
*udata
, frame_state
*sub_udata
)
3666 return __builtin_extract_return_addr
3667 (get_reg (udata
->retaddr_column
, udata
, sub_udata
));
3670 /* Overwrite the return address for frame UDATA with VAL. */
3673 put_return_addr (void *val
, frame_state
*udata
)
3675 val
= __builtin_frob_return_addr (val
);
3676 put_reg (udata
->retaddr_column
, val
, udata
);
3679 /* Given the current frame UDATA and its return address PC, return the
3680 information about the calling frame in CALLER_UDATA. */
3683 next_stack_level (void *pc
, frame_state
*udata
, frame_state
*caller_udata
)
3685 caller_udata
= __frame_state_for (pc
, caller_udata
);
3689 /* Now go back to our caller's stack frame. If our caller's CFA register
3690 was saved in our stack frame, restore it; otherwise, assume the CFA
3691 register is SP and restore it to our CFA value. */
3692 if (udata
->saved
[caller_udata
->cfa_reg
])
3693 caller_udata
->cfa
= get_reg (caller_udata
->cfa_reg
, udata
, 0);
3695 caller_udata
->cfa
= udata
->cfa
;
3696 caller_udata
->cfa
+= caller_udata
->cfa_offset
;
3698 return caller_udata
;
3701 /* Hook to call before __terminate if only cleanup handlers remain. */
3703 __unwinding_cleanup ()
3707 /* throw_helper performs some of the common grunt work for a throw. This
3708 routine is called by throw and rethrows. This is pretty much split
3709 out from the old __throw routine. An addition has been added which allows
3710 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3711 but cleanups remaining. This allows a debugger to examine the state
3712 at which the throw was executed, before any cleanups, rather than
3713 at the terminate point after the stack has been unwound.
3715 EH is the current eh_context structure.
3716 PC is the address of the call to __throw.
3717 MY_UDATA is the unwind information for __throw.
3718 OFFSET_P is where we return the SP adjustment offset. */
3721 throw_helper (eh
, pc
, my_udata
, offset_p
)
3722 struct eh_context
*eh
;
3724 frame_state
*my_udata
;
3727 frame_state ustruct2
, *udata
= &ustruct2
;
3728 frame_state ustruct
;
3729 frame_state
*sub_udata
= &ustruct
;
3730 void *saved_pc
= pc
;
3734 frame_state saved_ustruct
;
3737 int only_cleanup
= 0;
3739 int saved_state
= 0;
3741 __eh_info
*eh_info
= (__eh_info
*)eh
->info
;
3743 /* Do we find a handler based on a re-throw PC? */
3744 if (eh
->table_index
!= (void *) 0)
3747 memcpy (udata
, my_udata
, sizeof (*udata
));
3749 handler
= (void *) 0;
3752 frame_state
*p
= udata
;
3753 udata
= next_stack_level (pc
, udata
, sub_udata
);
3756 /* If we couldn't find the next frame, we lose. */
3760 if (udata
->eh_ptr
== NULL
)
3763 new_eh_model
= (((exception_descriptor
*)(udata
->eh_ptr
))->
3764 runtime_id_field
== NEW_EH_RUNTIME
);
3769 handler
= find_exception_handler (eh
->table_index
, udata
->eh_ptr
,
3770 eh_info
, 1, &cleanup
);
3771 eh
->table_index
= (void *)0;
3775 handler
= find_exception_handler (pc
, udata
->eh_ptr
, eh_info
,
3778 handler
= old_find_exception_handler (pc
, udata
->eh_ptr
);
3780 /* If we found one, we can stop searching, if its not a cleanup.
3781 for cleanups, we save the state, and keep looking. This allows
3782 us to call a debug hook if there are nothing but cleanups left. */
3789 saved_ustruct
= *udata
;
3790 handler_p
= handler
;
3803 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3804 hitting the beginning of the next region. */
3805 pc
= get_return_addr (udata
, sub_udata
) - 1;
3810 udata
= &saved_ustruct
;
3811 handler
= handler_p
;
3814 __unwinding_cleanup ();
3817 /* If we haven't found a handler by now, this is an unhandled
3822 eh
->handler_label
= handler
;
3824 args_size
= udata
->args_size
;
3827 /* We found a handler in the throw context, no need to unwind. */
3833 /* Unwind all the frames between this one and the handler by copying
3834 their saved register values into our register save slots. */
3836 /* Remember the PC where we found the handler. */
3837 void *handler_pc
= pc
;
3839 /* Start from the throw context again. */
3841 memcpy (udata
, my_udata
, sizeof (*udata
));
3843 while (pc
!= handler_pc
)
3845 frame_state
*p
= udata
;
3846 udata
= next_stack_level (pc
, udata
, sub_udata
);
3849 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; ++i
)
3850 if (i
!= udata
->retaddr_column
&& udata
->saved
[i
])
3852 /* If you modify the saved value of the return address
3853 register on the SPARC, you modify the return address for
3854 your caller's frame. Don't do that here, as it will
3855 confuse get_return_addr. */
3856 if (in_reg_window (i
, udata
)
3857 && udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
3858 && udata
->reg_or_offset
[udata
->retaddr_column
] == i
)
3860 copy_reg (i
, udata
, my_udata
);
3863 pc
= get_return_addr (udata
, sub_udata
) - 1;
3866 /* But we do need to update the saved return address register from
3867 the last frame we unwind, or the handler frame will have the wrong
3869 if (udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
)
3871 i
= udata
->reg_or_offset
[udata
->retaddr_column
];
3872 if (in_reg_window (i
, udata
))
3873 copy_reg (i
, udata
, my_udata
);
3876 /* udata now refers to the frame called by the handler frame. */
3878 /* We adjust SP by the difference between __throw's CFA and the CFA for
3879 the frame called by the handler frame, because those CFAs correspond
3880 to the SP values at the two call sites. We need to further adjust by
3881 the args_size of the handler frame itself to get the handler frame's
3882 SP from before the args were pushed for that call. */
3883 #ifdef STACK_GROWS_DOWNWARD
3884 *offset_p
= udata
->cfa
- my_udata
->cfa
+ args_size
;
3886 *offset_p
= my_udata
->cfa
- udata
->cfa
- args_size
;
3893 /* We first search for an exception handler, and if we don't find
3894 it, we call __terminate on the current stack frame so that we may
3895 use the debugger to walk the stack and understand why no handler
3898 If we find one, then we unwind the frames down to the one that
3899 has the handler and transfer control into the handler. */
3901 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3906 struct eh_context
*eh
= (*get_eh_context
) ();
3910 /* XXX maybe make my_ustruct static so we don't have to look it up for
3912 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3914 /* This is required for C++ semantics. We must call terminate if we
3915 try and rethrow an exception, when there is no exception currently
3920 /* Start at our stack frame. */
3922 my_udata
= __frame_state_for (&&label
, my_udata
);
3926 /* We need to get the value from the CFA register. */
3927 my_udata
->cfa
= __builtin_dwarf_cfa ();
3929 /* Do any necessary initialization to access arbitrary stack frames.
3930 On the SPARC, this means flushing the register windows. */
3931 __builtin_unwind_init ();
3933 /* Now reset pc to the right throw point. */
3934 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3936 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3940 __builtin_eh_return ((void *)eh
, offset
, handler
);
3942 /* Epilogue: restore the handler frame's register values and return
3946 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3952 struct eh_context
*eh
= (*get_eh_context
) ();
3956 /* XXX maybe make my_ustruct static so we don't have to look it up for
3958 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3960 /* This is required for C++ semantics. We must call terminate if we
3961 try and rethrow an exception, when there is no exception currently
3966 /* This is the table index we want to rethrow from. The value of
3967 the END_REGION label is used for the PC of the throw, and the
3968 search begins with the next table entry. */
3969 eh
->table_index
= index
;
3971 /* Start at our stack frame. */
3973 my_udata
= __frame_state_for (&&label
, my_udata
);
3977 /* We need to get the value from the CFA register. */
3978 my_udata
->cfa
= __builtin_dwarf_cfa ();
3980 /* Do any necessary initialization to access arbitrary stack frames.
3981 On the SPARC, this means flushing the register windows. */
3982 __builtin_unwind_init ();
3984 /* Now reset pc to the right throw point. */
3985 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3987 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3991 __builtin_eh_return ((void *)eh
, offset
, handler
);
3993 /* Epilogue: restore the handler frame's register values and return
3996 #endif /* DWARF2_UNWIND_INFO */
4001 #ifndef inhibit_libc
4002 /* This gets us __GNU_LIBRARY__. */
4003 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
4006 #ifdef __GNU_LIBRARY__
4007 /* Avoid forcing the library's meaning of `write' on the user program
4008 by using the "internal" name (for use within the library) */
4009 #define write(fd, buf, n) __write((fd), (buf), (n))
4011 #endif /* inhibit_libc */
4013 #define MESSAGE "pure virtual method called\n"
4015 extern void __terminate (void) __attribute__ ((__noreturn__
));
4020 #ifndef inhibit_libc
4021 write (2, MESSAGE
, sizeof (MESSAGE
) - 1);