1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
41 /* Don't use `fancy_abort' here even if config.h says to use it. */
48 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
49 #if defined (L_divdi3) || defined (L_moddi3)
61 w
.s
.high
= -uu
.s
.high
- ((UWtype
) w
.s
.low
> 0);
69 __addvsi3 (Wtype a
, Wtype b
)
75 if (b
>= 0 ? w
< a
: w
> a
)
84 __addvdi3 (DWtype a
, DWtype b
)
90 if (b
>= 0 ? w
< a
: w
> a
)
99 __subvsi3 (Wtype a
, Wtype b
)
102 return __addvsi3 (a
, (-b
));
108 if (b
>= 0 ? w
> a
: w
< a
)
118 __subvdi3 (DWtype a
, DWtype b
)
127 if (b
>= 0 ? w
> a
: w
< a
)
137 __mulvsi3 (Wtype a
, Wtype b
)
143 if (((a
>= 0) == (b
>= 0)) ? w
< 0 : w
> 0)
158 if (a
>= 0 ? w
> 0 : w
< 0)
173 if (a
>= 0 ? w
> 0 : w
< 0)
222 __mulvdi3 (DWtype u
, DWtype v
)
228 if (((u
>= 0) == (v
>= 0)) ? w
< 0 : w
> 0)
236 /* Unless shift functions are defined whith full ANSI prototypes,
237 parameter b will be promoted to int if word_type is smaller than an int. */
240 __lshrdi3 (DWtype u
, word_type b
)
251 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
255 w
.s
.low
= (UWtype
) uu
.s
.high
>> -bm
;
259 UWtype carries
= (UWtype
) uu
.s
.high
<< bm
;
261 w
.s
.high
= (UWtype
) uu
.s
.high
>> b
;
262 w
.s
.low
= ((UWtype
) uu
.s
.low
>> b
) | carries
;
271 __ashldi3 (DWtype u
, word_type b
)
282 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
286 w
.s
.high
= (UWtype
) uu
.s
.low
<< -bm
;
290 UWtype carries
= (UWtype
) uu
.s
.low
>> bm
;
292 w
.s
.low
= (UWtype
) uu
.s
.low
<< b
;
293 w
.s
.high
= ((UWtype
) uu
.s
.high
<< b
) | carries
;
302 __ashrdi3 (DWtype u
, word_type b
)
313 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
316 /* w.s.high = 1..1 or 0..0 */
317 w
.s
.high
= uu
.s
.high
>> (sizeof (Wtype
) * BITS_PER_UNIT
- 1);
318 w
.s
.low
= uu
.s
.high
>> -bm
;
322 UWtype carries
= (UWtype
) uu
.s
.high
<< bm
;
324 w
.s
.high
= uu
.s
.high
>> b
;
325 w
.s
.low
= ((UWtype
) uu
.s
.low
>> b
) | carries
;
337 UWtype word
, count
, add
;
341 word
= uu
.s
.low
, add
= 0;
342 else if (uu
.s
.high
!= 0)
343 word
= uu
.s
.high
, add
= BITS_PER_UNIT
* sizeof (Wtype
);
347 count_trailing_zeros (count
, word
);
348 return count
+ add
+ 1;
354 __muldi3 (DWtype u
, DWtype v
)
362 w
.ll
= __umulsidi3 (uu
.s
.low
, vv
.s
.low
);
363 w
.s
.high
+= ((UWtype
) uu
.s
.low
* (UWtype
) vv
.s
.high
364 + (UWtype
) uu
.s
.high
* (UWtype
) vv
.s
.low
);
371 #if defined (sdiv_qrnnd)
373 __udiv_w_sdiv (UWtype
*rp
, UWtype a1
, UWtype a0
, UWtype d
)
380 if (a1
< d
- a1
- (a0
>> (W_TYPE_SIZE
- 1)))
382 /* dividend, divisor, and quotient are nonnegative */
383 sdiv_qrnnd (q
, r
, a1
, a0
, d
);
387 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
388 sub_ddmmss (c1
, c0
, a1
, a0
, d
>> 1, d
<< (W_TYPE_SIZE
- 1));
389 /* Divide (c1*2^32 + c0) by d */
390 sdiv_qrnnd (q
, r
, c1
, c0
, d
);
391 /* Add 2^31 to quotient */
392 q
+= (UWtype
) 1 << (W_TYPE_SIZE
- 1);
397 b1
= d
>> 1; /* d/2, between 2^30 and 2^31 - 1 */
398 c1
= a1
>> 1; /* A/2 */
399 c0
= (a1
<< (W_TYPE_SIZE
- 1)) + (a0
>> 1);
401 if (a1
< b1
) /* A < 2^32*b1, so A/2 < 2^31*b1 */
403 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
405 r
= 2*r
+ (a0
& 1); /* Remainder from A/(2*b1) */
422 else if (c1
< b1
) /* So 2^31 <= (A/2)/b1 < 2^32 */
425 c0
= ~c0
; /* logical NOT */
427 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
429 q
= ~q
; /* (A/2)/b1 */
432 r
= 2*r
+ (a0
& 1); /* A/(2*b1) */
450 else /* Implies c1 = b1 */
451 { /* Hence a1 = d - 1 = 2*b1 - 1 */
469 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
471 __udiv_w_sdiv (UWtype
*rp
__attribute__ ((__unused__
)),
472 UWtype a1
__attribute__ ((__unused__
)),
473 UWtype a0
__attribute__ ((__unused__
)),
474 UWtype d
__attribute__ ((__unused__
)))
481 #if (defined (L_udivdi3) || defined (L_divdi3) || \
482 defined (L_umoddi3) || defined (L_moddi3))
487 const UQItype __clz_tab
[] =
489 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
490 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
491 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
492 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
493 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
494 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
495 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
496 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
502 #if (defined (L_udivdi3) || defined (L_divdi3) || \
503 defined (L_umoddi3) || defined (L_moddi3))
507 __udivmoddi4 (UDWtype n
, UDWtype d
, UDWtype
*rp
)
512 UWtype d0
, d1
, n0
, n1
, n2
;
524 #if !UDIV_NEEDS_NORMALIZATION
531 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
534 /* Remainder in n0. */
541 d0
= 1 / d0
; /* Divide intentionally by zero. */
543 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
544 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
546 /* Remainder in n0. */
557 #else /* UDIV_NEEDS_NORMALIZATION */
565 count_leading_zeros (bm
, d0
);
569 /* Normalize, i.e. make the most significant bit of the
573 n1
= (n1
<< bm
) | (n0
>> (W_TYPE_SIZE
- bm
));
577 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
580 /* Remainder in n0 >> bm. */
587 d0
= 1 / d0
; /* Divide intentionally by zero. */
589 count_leading_zeros (bm
, d0
);
593 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
594 conclude (the most significant bit of n1 is set) /\ (the
595 leading quotient digit q1 = 1).
597 This special case is necessary, not an optimization.
598 (Shifts counts of W_TYPE_SIZE are undefined.) */
607 b
= W_TYPE_SIZE
- bm
;
611 n1
= (n1
<< bm
) | (n0
>> b
);
614 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
619 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
621 /* Remainder in n0 >> bm. */
631 #endif /* UDIV_NEEDS_NORMALIZATION */
642 /* Remainder in n1n0. */
654 count_leading_zeros (bm
, d1
);
657 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
658 conclude (the most significant bit of n1 is set) /\ (the
659 quotient digit q0 = 0 or 1).
661 This special case is necessary, not an optimization. */
663 /* The condition on the next line takes advantage of that
664 n1 >= d1 (true due to program flow). */
665 if (n1
> d1
|| n0
>= d0
)
668 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
687 b
= W_TYPE_SIZE
- bm
;
689 d1
= (d1
<< bm
) | (d0
>> b
);
692 n1
= (n1
<< bm
) | (n0
>> b
);
695 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
696 umul_ppmm (m1
, m0
, q0
, d0
);
698 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
701 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
706 /* Remainder in (n1n0 - m1m0) >> bm. */
709 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
710 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
711 rr
.s
.high
= n1
>> bm
;
726 __divdi3 (DWtype u
, DWtype v
)
737 uu
.ll
= __negdi2 (uu
.ll
);
740 vv
.ll
= __negdi2 (vv
.ll
);
742 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDWtype
*) 0);
752 __moddi3 (DWtype u
, DWtype v
)
763 uu
.ll
= __negdi2 (uu
.ll
);
765 vv
.ll
= __negdi2 (vv
.ll
);
767 (void) __udivmoddi4 (uu
.ll
, vv
.ll
, &w
);
777 __umoddi3 (UDWtype u
, UDWtype v
)
781 (void) __udivmoddi4 (u
, v
, &w
);
789 __udivdi3 (UDWtype n
, UDWtype d
)
791 return __udivmoddi4 (n
, d
, (UDWtype
*) 0);
797 __cmpdi2 (DWtype a
, DWtype b
)
801 au
.ll
= a
, bu
.ll
= b
;
803 if (au
.s
.high
< bu
.s
.high
)
805 else if (au
.s
.high
> bu
.s
.high
)
807 if ((UWtype
) au
.s
.low
< (UWtype
) bu
.s
.low
)
809 else if ((UWtype
) au
.s
.low
> (UWtype
) bu
.s
.low
)
817 __ucmpdi2 (DWtype a
, DWtype b
)
821 au
.ll
= a
, bu
.ll
= b
;
823 if ((UWtype
) au
.s
.high
< (UWtype
) bu
.s
.high
)
825 else if ((UWtype
) au
.s
.high
> (UWtype
) bu
.s
.high
)
827 if ((UWtype
) au
.s
.low
< (UWtype
) bu
.s
.low
)
829 else if ((UWtype
) au
.s
.low
> (UWtype
) bu
.s
.low
)
835 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
836 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
837 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
840 __fixunstfDI (TFtype a
)
848 /* Compute high word of result, as a flonum. */
849 b
= (a
/ HIGH_WORD_COEFF
);
850 /* Convert that to fixed (but not to DWtype!),
851 and shift it into the high word. */
854 /* Remove high part from the TFtype, leaving the low part as flonum. */
856 /* Convert that to fixed (but not to DWtype!) and add it in.
857 Sometimes A comes out negative. This is significant, since
858 A has more bits than a long int does. */
867 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
872 return - __fixunstfDI (-a
);
873 return __fixunstfDI (a
);
877 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
878 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
879 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
882 __fixunsxfDI (XFtype a
)
890 /* Compute high word of result, as a flonum. */
891 b
= (a
/ HIGH_WORD_COEFF
);
892 /* Convert that to fixed (but not to DWtype!),
893 and shift it into the high word. */
896 /* Remove high part from the XFtype, leaving the low part as flonum. */
898 /* Convert that to fixed (but not to DWtype!) and add it in.
899 Sometimes A comes out negative. This is significant, since
900 A has more bits than a long int does. */
909 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
914 return - __fixunsxfDI (-a
);
915 return __fixunsxfDI (a
);
920 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
921 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
924 __fixunsdfDI (DFtype a
)
932 /* Compute high word of result, as a flonum. */
933 b
= (a
/ HIGH_WORD_COEFF
);
934 /* Convert that to fixed (but not to DWtype!),
935 and shift it into the high word. */
938 /* Remove high part from the DFtype, leaving the low part as flonum. */
940 /* Convert that to fixed (but not to DWtype!) and add it in.
941 Sometimes A comes out negative. This is significant, since
942 A has more bits than a long int does. */
956 return - __fixunsdfDI (-a
);
957 return __fixunsdfDI (a
);
962 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
963 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
966 __fixunssfDI (SFtype original_a
)
968 /* Convert the SFtype to a DFtype, because that is surely not going
969 to lose any bits. Some day someone else can write a faster version
970 that avoids converting to DFtype, and verify it really works right. */
971 DFtype a
= original_a
;
978 /* Compute high word of result, as a flonum. */
979 b
= (a
/ HIGH_WORD_COEFF
);
980 /* Convert that to fixed (but not to DWtype!),
981 and shift it into the high word. */
984 /* Remove high part from the DFtype, leaving the low part as flonum. */
986 /* Convert that to fixed (but not to DWtype!) and add it in.
987 Sometimes A comes out negative. This is significant, since
988 A has more bits than a long int does. */
1002 return - __fixunssfDI (-a
);
1003 return __fixunssfDI (a
);
1007 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
1008 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1009 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1010 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1013 __floatdixf (DWtype u
)
1017 d
= (Wtype
) (u
>> WORD_SIZE
);
1018 d
*= HIGH_HALFWORD_COEFF
;
1019 d
*= HIGH_HALFWORD_COEFF
;
1020 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1026 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
1027 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1028 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1029 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1032 __floatditf (DWtype u
)
1036 d
= (Wtype
) (u
>> WORD_SIZE
);
1037 d
*= HIGH_HALFWORD_COEFF
;
1038 d
*= HIGH_HALFWORD_COEFF
;
1039 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1046 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1047 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1048 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1051 __floatdidf (DWtype u
)
1055 d
= (Wtype
) (u
>> WORD_SIZE
);
1056 d
*= HIGH_HALFWORD_COEFF
;
1057 d
*= HIGH_HALFWORD_COEFF
;
1058 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1065 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1066 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1067 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1068 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
1070 /* Define codes for all the float formats that we know of. Note
1071 that this is copied from real.h. */
1073 #define UNKNOWN_FLOAT_FORMAT 0
1074 #define IEEE_FLOAT_FORMAT 1
1075 #define VAX_FLOAT_FORMAT 2
1076 #define IBM_FLOAT_FORMAT 3
1078 /* Default to IEEE float if not specified. Nearly all machines use it. */
1079 #ifndef HOST_FLOAT_FORMAT
1080 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1083 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1088 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1093 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1099 __floatdisf (DWtype u
)
1101 /* Do the calculation in DFmode
1102 so that we don't lose any of the precision of the high word
1103 while multiplying it. */
1106 /* Protect against double-rounding error.
1107 Represent any low-order bits, that might be truncated in DFmode,
1108 by a bit that won't be lost. The bit can go in anywhere below the
1109 rounding position of the SFmode. A fixed mask and bit position
1110 handles all usual configurations. It doesn't handle the case
1111 of 128-bit DImode, however. */
1112 if (DF_SIZE
< DI_SIZE
1113 && DF_SIZE
> (DI_SIZE
- DF_SIZE
+ SF_SIZE
))
1115 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
1116 if (! (- ((DWtype
) 1 << DF_SIZE
) < u
1117 && u
< ((DWtype
) 1 << DF_SIZE
)))
1119 if ((UDWtype
) u
& (REP_BIT
- 1))
1123 f
= (Wtype
) (u
>> WORD_SIZE
);
1124 f
*= HIGH_HALFWORD_COEFF
;
1125 f
*= HIGH_HALFWORD_COEFF
;
1126 f
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1132 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1133 /* Reenable the normal types, in case limits.h needs them. */
1146 __fixunsxfSI (XFtype a
)
1148 if (a
>= - (DFtype
) Wtype_MIN
)
1149 return (Wtype
) (a
+ Wtype_MIN
) - Wtype_MIN
;
1155 /* Reenable the normal types, in case limits.h needs them. */
1168 __fixunsdfSI (DFtype a
)
1170 if (a
>= - (DFtype
) Wtype_MIN
)
1171 return (Wtype
) (a
+ Wtype_MIN
) - Wtype_MIN
;
1177 /* Reenable the normal types, in case limits.h needs them. */
1190 __fixunssfSI (SFtype a
)
1192 if (a
>= - (SFtype
) Wtype_MIN
)
1193 return (Wtype
) (a
+ Wtype_MIN
) - Wtype_MIN
;
1198 /* From here on down, the routines use normal data types. */
1200 #define SItype bogus_type
1201 #define USItype bogus_type
1202 #define DItype bogus_type
1203 #define UDItype bogus_type
1204 #define SFtype bogus_type
1205 #define DFtype bogus_type
1223 /* Like bcmp except the sign is meaningful.
1224 Result is negative if S1 is less than S2,
1225 positive if S1 is greater, 0 if S1 and S2 are equal. */
1228 __gcc_bcmp (const unsigned char *s1
, const unsigned char *s2
, size_t size
)
1232 unsigned char c1
= *s1
++, c2
= *s2
++;
1243 #ifndef inhibit_libc
1245 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1247 /* This is used by the `assert' macro. */
1249 __eprintf (const char *string
, const char *expression
,
1250 unsigned int line
, const char *filename
)
1252 fprintf (stderr
, string
, expression
, line
, filename
);
1262 /* Structure emitted by -a */
1266 const char *filename
;
1270 const unsigned long *addresses
;
1272 /* Older GCC's did not emit these fields. */
1274 const char **functions
;
1275 const long *line_nums
;
1276 const char **filenames
;
1280 #ifdef BLOCK_PROFILER_CODE
1283 #ifndef inhibit_libc
1285 /* Simple minded basic block profiling output dumper for
1286 systems that don't provide tcov support. At present,
1287 it requires atexit and stdio. */
1289 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1291 char *ctime
PARAMS ((const time_t *));
1293 #include "gbl-ctors.h"
1294 #include "gcov-io.h"
1296 #ifdef TARGET_HAS_F_SETLKW
1301 static struct bb
*bb_head
;
1303 static int num_digits (long value
, int base
) __attribute__ ((const));
1305 /* Return the number of digits needed to print a value */
1306 /* __inline__ */ static int num_digits (long value
, int base
)
1308 int minus
= (value
< 0 && base
!= 16);
1309 unsigned long v
= (minus
) ? -value
: value
;
1323 __bb_exit_func (void)
1325 FILE *da_file
, *file
;
1332 i
= strlen (bb_head
->filename
) - 3;
1334 if (!strcmp (bb_head
->filename
+i
, ".da"))
1336 /* Must be -fprofile-arcs not -a.
1337 Dump data in a form that gcov expects. */
1341 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1345 /* Make sure the output file exists -
1346 but don't clobber exiting data. */
1347 if ((da_file
= fopen (ptr
->filename
, "a")) != 0)
1350 /* Need to re-open in order to be able to write from the start. */
1351 da_file
= fopen (ptr
->filename
, "r+b");
1352 /* Some old systems might not allow the 'b' mode modifier.
1353 Therefore, try to open without it. This can lead to a race
1354 condition so that when you delete and re-create the file, the
1355 file might be opened in text mode, but then, you shouldn't
1356 delete the file in the first place. */
1358 da_file
= fopen (ptr
->filename
, "r+");
1361 fprintf (stderr
, "arc profiling: Can't open output file %s.\n",
1366 /* After a fork, another process might try to read and/or write
1367 the same file simultanously. So if we can, lock the file to
1368 avoid race conditions. */
1369 #if defined (TARGET_HAS_F_SETLKW)
1371 struct flock s_flock
;
1373 s_flock
.l_type
= F_WRLCK
;
1374 s_flock
.l_whence
= SEEK_SET
;
1375 s_flock
.l_start
= 0;
1377 s_flock
.l_pid
= getpid ();
1379 while (fcntl (fileno (da_file
), F_SETLKW
, &s_flock
)
1384 /* If the file is not empty, and the number of counts in it is the
1385 same, then merge them in. */
1386 firstchar
= fgetc (da_file
);
1387 if (firstchar
== EOF
)
1389 if (ferror (da_file
))
1391 fprintf (stderr
, "arc profiling: Can't read output file ");
1392 perror (ptr
->filename
);
1399 if (ungetc (firstchar
, da_file
) == EOF
)
1401 if (__read_long (&n_counts
, da_file
, 8) != 0)
1403 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1408 if (n_counts
== ptr
->ncounts
)
1412 for (i
= 0; i
< n_counts
; i
++)
1416 if (__read_long (&v
, da_file
, 8) != 0)
1418 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1422 ptr
->counts
[i
] += v
;
1430 /* ??? Should first write a header to the file. Preferably, a 4 byte
1431 magic number, 4 bytes containing the time the program was
1432 compiled, 4 bytes containing the last modification time of the
1433 source file, and 4 bytes indicating the compiler options used.
1435 That way we can easily verify that the proper source/executable/
1436 data file combination is being used from gcov. */
1438 if (__write_long (ptr
->ncounts
, da_file
, 8) != 0)
1441 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1447 long *count_ptr
= ptr
->counts
;
1449 for (j
= ptr
->ncounts
; j
> 0; j
--)
1451 if (__write_long (*count_ptr
, da_file
, 8) != 0)
1459 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1463 if (fclose (da_file
) == EOF
)
1464 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1471 /* Must be basic block profiling. Emit a human readable output file. */
1473 file
= fopen ("bb.out", "a");
1482 /* This is somewhat type incorrect, but it avoids worrying about
1483 exactly where time.h is included from. It should be ok unless
1484 a void * differs from other pointer formats, or if sizeof (long)
1485 is < sizeof (time_t). It would be nice if we could assume the
1486 use of rationale standards here. */
1488 time ((void *) &time_value
);
1489 fprintf (file
, "Basic block profiling finished on %s\n", ctime ((void *) &time_value
));
1491 /* We check the length field explicitly in order to allow compatibility
1492 with older GCC's which did not provide it. */
1494 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1497 int func_p
= (ptr
->nwords
>= (long) sizeof (struct bb
)
1498 && ptr
->nwords
<= 1000
1500 int line_p
= (func_p
&& ptr
->line_nums
);
1501 int file_p
= (func_p
&& ptr
->filenames
);
1502 int addr_p
= (ptr
->addresses
!= 0);
1503 long ncounts
= ptr
->ncounts
;
1509 int blk_len
= num_digits (ncounts
, 10);
1514 fprintf (file
, "File %s, %ld basic blocks \n\n",
1515 ptr
->filename
, ncounts
);
1517 /* Get max values for each field. */
1518 for (i
= 0; i
< ncounts
; i
++)
1523 if (cnt_max
< ptr
->counts
[i
])
1524 cnt_max
= ptr
->counts
[i
];
1526 if (addr_p
&& (unsigned long) addr_max
< ptr
->addresses
[i
])
1527 addr_max
= ptr
->addresses
[i
];
1529 if (line_p
&& line_max
< ptr
->line_nums
[i
])
1530 line_max
= ptr
->line_nums
[i
];
1534 p
= (ptr
->functions
[i
]) ? (ptr
->functions
[i
]) : "<none>";
1542 p
= (ptr
->filenames
[i
]) ? (ptr
->filenames
[i
]) : "<none>";
1549 addr_len
= num_digits (addr_max
, 16);
1550 cnt_len
= num_digits (cnt_max
, 10);
1551 line_len
= num_digits (line_max
, 10);
1553 /* Now print out the basic block information. */
1554 for (i
= 0; i
< ncounts
; i
++)
1557 " Block #%*d: executed %*ld time(s)",
1559 cnt_len
, ptr
->counts
[i
]);
1562 fprintf (file
, " address= 0x%.*lx", addr_len
,
1566 fprintf (file
, " function= %-*s", func_len
,
1567 (ptr
->functions
[i
]) ? ptr
->functions
[i
] : "<none>");
1570 fprintf (file
, " line= %*ld", line_len
, ptr
->line_nums
[i
]);
1573 fprintf (file
, " file= %s",
1574 (ptr
->filenames
[i
]) ? ptr
->filenames
[i
] : "<none>");
1576 fprintf (file
, "\n");
1579 fprintf (file
, "\n");
1583 fprintf (file
, "\n\n");
1589 __bb_init_func (struct bb
*blocks
)
1591 /* User is supposed to check whether the first word is non-0,
1592 but just in case.... */
1594 if (blocks
->zero_word
)
1597 /* Initialize destructor. */
1599 atexit (__bb_exit_func
);
1601 /* Set up linked list. */
1602 blocks
->zero_word
= 1;
1603 blocks
->next
= bb_head
;
1607 /* Called before fork or exec - write out profile information gathered so
1608 far and reset it to zero. This avoids duplication or loss of the
1609 profile information gathered so far. */
1611 __bb_fork_func (void)
1616 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1619 for (i
= ptr
->ncounts
- 1; i
>= 0; i
--)
1624 #ifndef MACHINE_STATE_SAVE
1625 #define MACHINE_STATE_SAVE(ID)
1627 #ifndef MACHINE_STATE_RESTORE
1628 #define MACHINE_STATE_RESTORE(ID)
1631 /* Number of buckets in hashtable of basic block addresses. */
1633 #define BB_BUCKETS 311
1635 /* Maximum length of string in file bb.in. */
1637 #define BBINBUFSIZE 500
1641 struct bb_edge
*next
;
1642 unsigned long src_addr
;
1643 unsigned long dst_addr
;
1644 unsigned long count
;
1649 TRACE_KEEP
= 0, TRACE_ON
= 1, TRACE_OFF
= 2
1654 struct bb_func
*next
;
1657 enum bb_func_mode mode
;
1660 /* This is the connection to the outside world.
1661 The BLOCK_PROFILER macro must set __bb.blocks
1662 and __bb.blockno. */
1665 unsigned long blockno
;
1669 /* Vars to store addrs of source and destination basic blocks
1672 static unsigned long bb_src
= 0;
1673 static unsigned long bb_dst
= 0;
1675 static FILE *bb_tracefile
= (FILE *) 0;
1676 static struct bb_edge
**bb_hashbuckets
= (struct bb_edge
**) 0;
1677 static struct bb_func
*bb_func_head
= (struct bb_func
*) 0;
1678 static unsigned long bb_callcount
= 0;
1679 static int bb_mode
= 0;
1681 static unsigned long *bb_stack
= (unsigned long *) 0;
1682 static size_t bb_stacksize
= 0;
1684 static int reported
= 0;
1687 Always : Print execution frequencies of basic blocks
1689 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1690 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1691 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1692 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1697 /*#include <sys/types.h>*/
1698 #include <sys/stat.h>
1699 /*#include <malloc.h>*/
1701 /* Commands executed by gopen. */
1703 #define GOPENDECOMPRESS "gzip -cd "
1704 #define GOPENCOMPRESS "gzip -c >"
1706 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1707 If it does not compile, simply replace gopen by fopen and delete
1708 '.gz' from any first parameter to gopen. */
1711 gopen (char *fn
, char *mode
)
1719 if (mode
[0] != 'r' && mode
[0] != 'w')
1722 p
= fn
+ strlen (fn
)-1;
1723 use_gzip
= ((p
[-1] == '.' && (p
[0] == 'Z' || p
[0] == 'z'))
1724 || (p
[-2] == '.' && p
[-1] == 'g' && p
[0] == 'z'));
1731 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1732 + sizeof (GOPENDECOMPRESS
));
1733 strcpy (s
, GOPENDECOMPRESS
);
1734 strcpy (s
+ (sizeof (GOPENDECOMPRESS
)-1), fn
);
1735 f
= popen (s
, mode
);
1743 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1744 + sizeof (GOPENCOMPRESS
));
1745 strcpy (s
, GOPENCOMPRESS
);
1746 strcpy (s
+ (sizeof (GOPENCOMPRESS
)-1), fn
);
1747 if (!(f
= popen (s
, mode
)))
1748 f
= fopen (s
, mode
);
1755 return fopen (fn
, mode
);
1765 if (!fstat (fileno (f
), &buf
) && S_ISFIFO (buf
.st_mode
))
1773 #endif /* HAVE_POPEN */
1775 /* Called once per program. */
1778 __bb_exit_trace_func (void)
1780 FILE *file
= fopen ("bb.out", "a");
1793 gclose (bb_tracefile
);
1795 fclose (bb_tracefile
);
1796 #endif /* HAVE_POPEN */
1799 /* Check functions in `bb.in'. */
1804 const struct bb_func
*p
;
1805 int printed_something
= 0;
1809 /* This is somewhat type incorrect. */
1810 time ((void *) &time_value
);
1812 for (p
= bb_func_head
; p
!= (struct bb_func
*) 0; p
= p
->next
)
1814 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1816 if (!ptr
->filename
|| (p
->filename
!= (char *) 0 && strcmp (p
->filename
, ptr
->filename
)))
1818 for (blk
= 0; blk
< ptr
->ncounts
; blk
++)
1820 if (!strcmp (p
->funcname
, ptr
->functions
[blk
]))
1825 if (!printed_something
)
1827 fprintf (file
, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value
));
1828 printed_something
= 1;
1831 fprintf (file
, "\tFunction %s", p
->funcname
);
1833 fprintf (file
, " of file %s", p
->filename
);
1834 fprintf (file
, "\n" );
1839 if (printed_something
)
1840 fprintf (file
, "\n");
1846 if (!bb_hashbuckets
)
1850 fprintf (stderr
, "Profiler: out of memory\n");
1860 unsigned long addr_max
= 0;
1861 unsigned long cnt_max
= 0;
1865 /* This is somewhat type incorrect, but it avoids worrying about
1866 exactly where time.h is included from. It should be ok unless
1867 a void * differs from other pointer formats, or if sizeof (long)
1868 is < sizeof (time_t). It would be nice if we could assume the
1869 use of rationale standards here. */
1871 time ((void *) &time_value
);
1872 fprintf (file
, "Basic block jump tracing");
1874 switch (bb_mode
& 12)
1877 fprintf (file
, " (with call)");
1881 /* Print nothing. */
1885 fprintf (file
, " (with call & ret)");
1889 fprintf (file
, " (with ret)");
1893 fprintf (file
, " finished on %s\n", ctime ((void *) &time_value
));
1895 for (i
= 0; i
< BB_BUCKETS
; i
++)
1897 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
1898 for ( ; bucket
; bucket
= bucket
->next
)
1900 if (addr_max
< bucket
->src_addr
)
1901 addr_max
= bucket
->src_addr
;
1902 if (addr_max
< bucket
->dst_addr
)
1903 addr_max
= bucket
->dst_addr
;
1904 if (cnt_max
< bucket
->count
)
1905 cnt_max
= bucket
->count
;
1908 addr_len
= num_digits (addr_max
, 16);
1909 cnt_len
= num_digits (cnt_max
, 10);
1911 for ( i
= 0; i
< BB_BUCKETS
; i
++)
1913 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
1914 for ( ; bucket
; bucket
= bucket
->next
)
1917 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
1918 addr_len
, bucket
->src_addr
,
1919 addr_len
, bucket
->dst_addr
,
1920 cnt_len
, bucket
->count
);
1924 fprintf (file
, "\n");
1932 /* Free allocated memory. */
1937 struct bb_func
*old
= f
;
1940 if (old
->funcname
) free (old
->funcname
);
1941 if (old
->filename
) free (old
->filename
);
1952 for (i
= 0; i
< BB_BUCKETS
; i
++)
1954 struct bb_edge
*old
, *bucket
= bb_hashbuckets
[i
];
1959 bucket
= bucket
->next
;
1963 free (bb_hashbuckets
);
1966 for (b
= bb_head
; b
; b
= b
->next
)
1967 if (b
->flags
) free (b
->flags
);
1970 /* Called once per program. */
1973 __bb_init_prg (void)
1976 char buf
[BBINBUFSIZE
];
1979 enum bb_func_mode m
;
1982 /* Initialize destructor. */
1983 atexit (__bb_exit_func
);
1985 if (!(file
= fopen ("bb.in", "r")))
1988 while(fgets (buf
, BBINBUFSIZE
, file
) != 0)
2004 if (!strcmp (p
, "__bb_trace__"))
2006 else if (!strcmp (p
, "__bb_jumps__"))
2008 else if (!strcmp (p
, "__bb_hidecall__"))
2010 else if (!strcmp (p
, "__bb_showret__"))
2014 struct bb_func
*f
= (struct bb_func
*) malloc (sizeof (struct bb_func
));
2018 f
->next
= bb_func_head
;
2019 if ((pos
= strchr (p
, ':')))
2021 if (!(f
->funcname
= (char *) malloc (strlen (pos
+1)+1)))
2023 strcpy (f
->funcname
, pos
+1);
2025 if ((f
->filename
= (char *) malloc (l
+1)))
2027 strncpy (f
->filename
, p
, l
);
2028 f
->filename
[l
] = '\0';
2031 f
->filename
= (char *) 0;
2035 if (!(f
->funcname
= (char *) malloc (strlen (p
)+1)))
2037 strcpy (f
->funcname
, p
);
2038 f
->filename
= (char *) 0;
2050 bb_tracefile
= gopen ("bbtrace.gz", "w");
2055 bb_tracefile
= fopen ("bbtrace", "w");
2057 #endif /* HAVE_POPEN */
2061 bb_hashbuckets
= (struct bb_edge
**)
2062 malloc (BB_BUCKETS
* sizeof (struct bb_edge
*));
2064 /* Use a loop here rather than calling bzero to avoid having to
2065 conditionalize its existance. */
2066 for (i
= 0; i
< BB_BUCKETS
; i
++)
2067 bb_hashbuckets
[i
] = 0;
2073 bb_stack
= (unsigned long *) malloc (bb_stacksize
* sizeof (*bb_stack
));
2076 /* Initialize destructor. */
2077 atexit (__bb_exit_trace_func
);
2080 /* Called upon entering a basic block. */
2083 __bb_trace_func (void)
2085 struct bb_edge
*bucket
;
2087 MACHINE_STATE_SAVE("1")
2089 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2092 bb_dst
= __bb
.blocks
->addresses
[__bb
.blockno
];
2093 __bb
.blocks
->counts
[__bb
.blockno
]++;
2097 fwrite (&bb_dst
, sizeof (unsigned long), 1, bb_tracefile
);
2102 struct bb_edge
**startbucket
, **oldnext
;
2104 oldnext
= startbucket
2105 = & bb_hashbuckets
[ (((int) bb_src
*8) ^ (int) bb_dst
) % BB_BUCKETS
];
2106 bucket
= *startbucket
;
2108 for (bucket
= *startbucket
; bucket
;
2109 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2111 if (bucket
->src_addr
== bb_src
2112 && bucket
->dst_addr
== bb_dst
)
2115 *oldnext
= bucket
->next
;
2116 bucket
->next
= *startbucket
;
2117 *startbucket
= bucket
;
2122 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2128 fprintf (stderr
, "Profiler: out of memory\n");
2135 bucket
->src_addr
= bb_src
;
2136 bucket
->dst_addr
= bb_dst
;
2137 bucket
->next
= *startbucket
;
2138 *startbucket
= bucket
;
2149 MACHINE_STATE_RESTORE("1")
2153 /* Called when returning from a function and `__bb_showret__' is set. */
2156 __bb_trace_func_ret (void)
2158 struct bb_edge
*bucket
;
2160 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2165 struct bb_edge
**startbucket
, **oldnext
;
2167 oldnext
= startbucket
2168 = & bb_hashbuckets
[ (((int) bb_dst
* 8) ^ (int) bb_src
) % BB_BUCKETS
];
2169 bucket
= *startbucket
;
2171 for (bucket
= *startbucket
; bucket
;
2172 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2174 if (bucket
->src_addr
== bb_dst
2175 && bucket
->dst_addr
== bb_src
)
2178 *oldnext
= bucket
->next
;
2179 bucket
->next
= *startbucket
;
2180 *startbucket
= bucket
;
2185 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2191 fprintf (stderr
, "Profiler: out of memory\n");
2198 bucket
->src_addr
= bb_dst
;
2199 bucket
->dst_addr
= bb_src
;
2200 bucket
->next
= *startbucket
;
2201 *startbucket
= bucket
;
2214 /* Called upon entering the first function of a file. */
2217 __bb_init_file (struct bb
*blocks
)
2220 const struct bb_func
*p
;
2221 long blk
, ncounts
= blocks
->ncounts
;
2222 const char **functions
= blocks
->functions
;
2224 /* Set up linked list. */
2225 blocks
->zero_word
= 1;
2226 blocks
->next
= bb_head
;
2231 || !(blocks
->flags
= (char *) malloc (sizeof (char) * blocks
->ncounts
)))
2234 for (blk
= 0; blk
< ncounts
; blk
++)
2235 blocks
->flags
[blk
] = 0;
2237 for (blk
= 0; blk
< ncounts
; blk
++)
2239 for (p
= bb_func_head
; p
; p
= p
->next
)
2241 if (!strcmp (p
->funcname
, functions
[blk
])
2242 && (!p
->filename
|| !strcmp (p
->filename
, blocks
->filename
)))
2244 blocks
->flags
[blk
] |= p
->mode
;
2251 /* Called when exiting from a function. */
2254 __bb_trace_ret (void)
2257 MACHINE_STATE_SAVE("2")
2261 if ((bb_mode
& 12) && bb_stacksize
> bb_callcount
)
2263 bb_src
= bb_stack
[bb_callcount
];
2265 __bb_trace_func_ret ();
2271 MACHINE_STATE_RESTORE("2")
2275 /* Called when entering a function. */
2278 __bb_init_trace_func (struct bb
*blocks
, unsigned long blockno
)
2280 static int trace_init
= 0;
2282 MACHINE_STATE_SAVE("3")
2284 if (!blocks
->zero_word
)
2291 __bb_init_file (blocks
);
2301 if (bb_callcount
>= bb_stacksize
)
2303 size_t newsize
= bb_callcount
+ 100;
2305 bb_stack
= (unsigned long *) realloc (bb_stack
, newsize
);
2310 fprintf (stderr
, "Profiler: out of memory\n");
2314 goto stack_overflow
;
2316 bb_stacksize
= newsize
;
2318 bb_stack
[bb_callcount
] = bb_src
;
2329 else if (blocks
->flags
&& (blocks
->flags
[blockno
] & TRACE_ON
))
2335 bb_stack
[bb_callcount
] = bb_src
;
2338 MACHINE_STATE_RESTORE("3")
2341 #endif /* not inhibit_libc */
2342 #endif /* not BLOCK_PROFILER_CODE */
2345 #ifdef L_clear_cache
2346 /* Clear part of an instruction cache. */
2348 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2351 __clear_cache (char *beg
__attribute__((__unused__
)),
2352 char *end
__attribute__((__unused__
)))
2354 #ifdef CLEAR_INSN_CACHE
2355 CLEAR_INSN_CACHE (beg
, end
);
2357 #ifdef INSN_CACHE_SIZE
2358 static char array
[INSN_CACHE_SIZE
+ INSN_CACHE_PLANE_SIZE
+ INSN_CACHE_LINE_WIDTH
];
2359 static int initialized
;
2363 typedef (*function_ptr
) (void);
2365 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2366 /* It's cheaper to clear the whole cache.
2367 Put in a series of jump instructions so that calling the beginning
2368 of the cache will clear the whole thing. */
2372 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2373 & -INSN_CACHE_LINE_WIDTH
);
2374 int end_ptr
= ptr
+ INSN_CACHE_SIZE
;
2376 while (ptr
< end_ptr
)
2378 *(INSTRUCTION_TYPE
*)ptr
2379 = JUMP_AHEAD_INSTRUCTION
+ INSN_CACHE_LINE_WIDTH
;
2380 ptr
+= INSN_CACHE_LINE_WIDTH
;
2382 *(INSTRUCTION_TYPE
*) (ptr
- INSN_CACHE_LINE_WIDTH
) = RETURN_INSTRUCTION
;
2387 /* Call the beginning of the sequence. */
2388 (((function_ptr
) (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2389 & -INSN_CACHE_LINE_WIDTH
))
2392 #else /* Cache is large. */
2396 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2397 & -INSN_CACHE_LINE_WIDTH
);
2399 while (ptr
< (int) array
+ sizeof array
)
2401 *(INSTRUCTION_TYPE
*)ptr
= RETURN_INSTRUCTION
;
2402 ptr
+= INSN_CACHE_LINE_WIDTH
;
2408 /* Find the location in array that occupies the same cache line as BEG. */
2410 offset
= ((int) beg
& -INSN_CACHE_LINE_WIDTH
) & (INSN_CACHE_PLANE_SIZE
- 1);
2411 start_addr
= (((int) (array
+ INSN_CACHE_PLANE_SIZE
- 1)
2412 & -INSN_CACHE_PLANE_SIZE
)
2415 /* Compute the cache alignment of the place to stop clearing. */
2416 #if 0 /* This is not needed for gcc's purposes. */
2417 /* If the block to clear is bigger than a cache plane,
2418 we clear the entire cache, and OFFSET is already correct. */
2419 if (end
< beg
+ INSN_CACHE_PLANE_SIZE
)
2421 offset
= (((int) (end
+ INSN_CACHE_LINE_WIDTH
- 1)
2422 & -INSN_CACHE_LINE_WIDTH
)
2423 & (INSN_CACHE_PLANE_SIZE
- 1));
2425 #if INSN_CACHE_DEPTH > 1
2426 end_addr
= (start_addr
& -INSN_CACHE_PLANE_SIZE
) + offset
;
2427 if (end_addr
<= start_addr
)
2428 end_addr
+= INSN_CACHE_PLANE_SIZE
;
2430 for (plane
= 0; plane
< INSN_CACHE_DEPTH
; plane
++)
2432 int addr
= start_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2433 int stop
= end_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2435 while (addr
!= stop
)
2437 /* Call the return instruction at ADDR. */
2438 ((function_ptr
) addr
) ();
2440 addr
+= INSN_CACHE_LINE_WIDTH
;
2443 #else /* just one plane */
2446 /* Call the return instruction at START_ADDR. */
2447 ((function_ptr
) start_addr
) ();
2449 start_addr
+= INSN_CACHE_LINE_WIDTH
;
2451 while ((start_addr
% INSN_CACHE_SIZE
) != offset
);
2452 #endif /* just one plane */
2453 #endif /* Cache is large */
2454 #endif /* Cache exists */
2455 #endif /* CLEAR_INSN_CACHE */
2458 #endif /* L_clear_cache */
2462 /* Jump to a trampoline, loading the static chain address. */
2464 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2477 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2481 mprotect (char *addr
, int len
, int prot
)
2498 if (VirtualProtect (addr
, len
, np
, &op
))
2504 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2506 #ifdef TRANSFER_FROM_TRAMPOLINE
2507 TRANSFER_FROM_TRAMPOLINE
2510 #if defined (NeXT) && defined (__MACH__)
2512 /* Make stack executable so we can call trampolines on stack.
2513 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2517 #include <mach/mach.h>
2521 __enable_execute_stack (char *addr
)
2524 char *eaddr
= addr
+ TRAMPOLINE_SIZE
;
2525 vm_address_t a
= (vm_address_t
) addr
;
2527 /* turn on execute access on stack */
2528 r
= vm_protect (task_self (), a
, TRAMPOLINE_SIZE
, FALSE
, VM_PROT_ALL
);
2529 if (r
!= KERN_SUCCESS
)
2531 mach_error("vm_protect VM_PROT_ALL", r
);
2535 /* We inline the i-cache invalidation for speed */
2537 #ifdef CLEAR_INSN_CACHE
2538 CLEAR_INSN_CACHE (addr
, eaddr
);
2540 __clear_cache ((int) addr
, (int) eaddr
);
2544 #endif /* defined (NeXT) && defined (__MACH__) */
2548 /* Make stack executable so we can call trampolines on stack.
2549 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2551 #include <sys/mman.h>
2552 #include <sys/vmparam.h>
2553 #include <machine/machparam.h>
2556 __enable_execute_stack (void)
2559 static unsigned lowest
= USRSTACK
;
2560 unsigned current
= (unsigned) &fp
& -NBPG
;
2562 if (lowest
> current
)
2564 unsigned len
= lowest
- current
;
2565 mremap (current
, &len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_PRIVATE
);
2569 /* Clear instruction cache in case an old trampoline is in it. */
2572 #endif /* __convex__ */
2576 /* Modified from the convex -code above. */
2578 #include <sys/param.h>
2580 #include <sys/m88kbcs.h>
2583 __enable_execute_stack (void)
2586 static unsigned long lowest
= USRSTACK
;
2587 unsigned long current
= (unsigned long) &save_errno
& -NBPC
;
2589 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2590 address is seen as 'negative'. That is the case with the stack. */
2593 if (lowest
> current
)
2595 unsigned len
=lowest
-current
;
2596 memctl(current
,len
,MCT_TEXT
);
2600 memctl(current
,NBPC
,MCT_TEXT
);
2604 #endif /* __sysV88__ */
2608 #include <sys/signal.h>
2611 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2612 so define it here, because we need it in __clear_insn_cache below */
2613 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2614 hence we enable this stuff only if MCT_TEXT is #define'd. */
2629 /* Clear instruction cache so we can call trampolines on stack.
2630 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2633 __clear_insn_cache (void)
2638 /* Preserve errno, because users would be surprised to have
2639 errno changing without explicitly calling any system-call. */
2642 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2643 No need to use an address derived from _start or %sp, as 0 works also. */
2644 memctl(0, 4096, MCT_TEXT
);
2649 #endif /* __sysV68__ */
2653 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2655 #include <sys/mman.h>
2656 #include <sys/types.h>
2657 #include <sys/param.h>
2658 #include <sys/vmmac.h>
2660 /* Modified from the convex -code above.
2661 mremap promises to clear the i-cache. */
2664 __enable_execute_stack (void)
2667 if (mprotect (((unsigned int)&fp
/PAGSIZ
)*PAGSIZ
, PAGSIZ
,
2668 PROT_READ
|PROT_WRITE
|PROT_EXEC
))
2670 perror ("mprotect in __enable_execute_stack");
2675 #endif /* __pyr__ */
2677 #if defined (sony_news) && defined (SYSTYPE_BSD)
2680 #include <sys/types.h>
2681 #include <sys/param.h>
2682 #include <syscall.h>
2683 #include <machine/sysnews.h>
2685 /* cacheflush function for NEWS-OS 4.2.
2686 This function is called from trampoline-initialize code
2687 defined in config/mips/mips.h. */
2690 cacheflush (char *beg
, int size
, int flag
)
2692 if (syscall (SYS_sysnews
, NEWS_CACHEFLUSH
, beg
, size
, FLUSH_BCACHE
))
2694 perror ("cache_flush");
2700 #endif /* sony_news */
2701 #endif /* L_trampoline */
2706 #include "gbl-ctors.h"
2707 /* Some systems use __main in a way incompatible with its use in gcc, in these
2708 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2709 give the same symbol without quotes for an alternative entry point. You
2710 must define both, or neither. */
2712 #define NAME__MAIN "__main"
2713 #define SYMBOL__MAIN __main
2716 #ifdef INIT_SECTION_ASM_OP
2717 #undef HAS_INIT_SECTION
2718 #define HAS_INIT_SECTION
2721 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2723 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2724 code to run constructors. In that case, we need to handle EH here, too. */
2726 #ifdef EH_FRAME_SECTION
2728 extern unsigned char __EH_FRAME_BEGIN__
[];
2731 /* Run all the global destructors on exit from the program. */
2734 __do_global_dtors (void)
2736 #ifdef DO_GLOBAL_DTORS_BODY
2737 DO_GLOBAL_DTORS_BODY
;
2739 static func_ptr
*p
= __DTOR_LIST__
+ 1;
2746 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2748 static int completed
= 0;
2752 __deregister_frame_info (__EH_FRAME_BEGIN__
);
2759 #ifndef HAS_INIT_SECTION
2760 /* Run all the global constructors on entry to the program. */
2763 __do_global_ctors (void)
2765 #ifdef EH_FRAME_SECTION
2767 static struct object object
;
2768 __register_frame_info (__EH_FRAME_BEGIN__
, &object
);
2771 DO_GLOBAL_CTORS_BODY
;
2772 atexit (__do_global_dtors
);
2774 #endif /* no HAS_INIT_SECTION */
2776 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2777 /* Subroutine called automatically by `main'.
2778 Compiling a global function named `main'
2779 produces an automatic call to this function at the beginning.
2781 For many systems, this routine calls __do_global_ctors.
2782 For systems which support a .init section we use the .init section
2783 to run __do_global_ctors, so we need not do anything here. */
2788 /* Support recursive calls to `main': run initializers just once. */
2789 static int initialized
;
2793 __do_global_ctors ();
2796 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2798 #endif /* L__main */
2799 #endif /* __CYGWIN__ */
2803 #include "gbl-ctors.h"
2805 /* Provide default definitions for the lists of constructors and
2806 destructors, so that we don't get linker errors. These symbols are
2807 intentionally bss symbols, so that gld and/or collect will provide
2808 the right values. */
2810 /* We declare the lists here with two elements each,
2811 so that they are valid empty lists if no other definition is loaded.
2813 If we are using the old "set" extensions to have the gnu linker
2814 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2815 must be in the bss/common section.
2817 Long term no port should use those extensions. But many still do. */
2818 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2819 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2820 func_ptr __CTOR_LIST__
[2] = {0, 0};
2821 func_ptr __DTOR_LIST__
[2] = {0, 0};
2823 func_ptr __CTOR_LIST__
[2];
2824 func_ptr __DTOR_LIST__
[2];
2826 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2827 #endif /* L_ctors */
2831 #include "gbl-ctors.h"
2839 static func_ptr
*atexit_chain
= 0;
2840 static long atexit_chain_length
= 0;
2841 static volatile long last_atexit_chain_slot
= -1;
2844 atexit (func_ptr func
)
2846 if (++last_atexit_chain_slot
== atexit_chain_length
)
2848 atexit_chain_length
+= 32;
2850 atexit_chain
= (func_ptr
*) realloc (atexit_chain
, atexit_chain_length
2851 * sizeof (func_ptr
));
2853 atexit_chain
= (func_ptr
*) malloc (atexit_chain_length
2854 * sizeof (func_ptr
));
2857 atexit_chain_length
= 0;
2858 last_atexit_chain_slot
= -1;
2863 atexit_chain
[last_atexit_chain_slot
] = func
;
2867 extern void _cleanup (void);
2868 extern void _exit (int) __attribute__ ((__noreturn__
));
2875 for ( ; last_atexit_chain_slot
-- >= 0; )
2877 (*atexit_chain
[last_atexit_chain_slot
+ 1]) ();
2878 atexit_chain
[last_atexit_chain_slot
+ 1] = 0;
2880 free (atexit_chain
);
2893 /* Simple; we just need a wrapper for ON_EXIT. */
2895 atexit (func_ptr func
)
2897 return ON_EXIT (func
);
2900 #endif /* ON_EXIT */
2901 #endif /* NEED_ATEXIT */
2909 /* Shared exception handling support routines. */
2912 __default_terminate (void)
2917 static __terminate_func_ptr __terminate_func
=
2918 __default_terminate
;
2920 void __attribute__((__noreturn__
))
2923 (*__terminate_func
)();
2926 __terminate_func_ptr
2927 __terminate_set_func (__terminate_func_ptr newfunc
)
2929 __terminate_func_ptr oldfunc
= __terminate_func
;
2931 __terminate_func
= newfunc
;
2936 __throw_type_match (void *catch_type
, void *throw_type
, void *obj
)
2939 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
2940 catch_type
, throw_type
);
2942 if (strcmp ((const char *)catch_type
, (const char *)throw_type
) == 0)
2949 /* Include definitions of EH context and table layout */
2951 #include "eh-common.h"
2952 #ifndef inhibit_libc
2956 /* Allocate and return a new EH context structure. */
2960 new_eh_context (void)
2962 struct eh_full_context
{
2963 struct eh_context c
;
2965 } *ehfc
= (struct eh_full_context
*) malloc (sizeof *ehfc
);
2970 memset (ehfc
, 0, sizeof *ehfc
);
2972 ehfc
->c
.dynamic_handler_chain
= (void **) ehfc
->top_elt
;
2974 /* This should optimize out entirely. This should always be true,
2975 but just in case it ever isn't, don't allow bogus code to be
2978 if ((void*)(&ehfc
->c
) != (void*)ehfc
)
2984 static __gthread_key_t eh_context_key
;
2986 /* Destructor for struct eh_context. */
2988 eh_context_free (void *ptr
)
2990 __gthread_key_dtor (eh_context_key
, ptr
);
2996 /* Pointer to function to return EH context. */
2998 static struct eh_context
*eh_context_initialize (void);
2999 static struct eh_context
*eh_context_static (void);
3001 static struct eh_context
*eh_context_specific (void);
3004 static struct eh_context
*(*get_eh_context
) (void) = &eh_context_initialize
;
3006 /* Routine to get EH context.
3007 This one will simply call the function pointer. */
3010 __get_eh_context (void)
3012 return (void *) (*get_eh_context
) ();
3015 /* Get and set the language specific info pointer. */
3018 __get_eh_info (void)
3020 struct eh_context
*eh
= (*get_eh_context
) ();
3024 #ifdef DWARF2_UNWIND_INFO
3025 static int dwarf_reg_size_table_initialized
= 0;
3026 static char dwarf_reg_size_table
[DWARF_FRAME_REGISTERS
];
3029 init_reg_size_table (void)
3031 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table
);
3032 dwarf_reg_size_table_initialized
= 1;
3038 eh_threads_initialize (void)
3040 /* Try to create the key. If it fails, revert to static method,
3041 otherwise start using thread specific EH contexts. */
3042 if (__gthread_key_create (&eh_context_key
, &eh_context_free
) == 0)
3043 get_eh_context
= &eh_context_specific
;
3045 get_eh_context
= &eh_context_static
;
3047 #endif /* no __GTHREADS */
3049 /* Initialize EH context.
3050 This will be called only once, since we change GET_EH_CONTEXT
3051 pointer to another routine. */
3053 static struct eh_context
*
3054 eh_context_initialize (void)
3058 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
3059 /* Make sure that get_eh_context does not point to us anymore.
3060 Some systems have dummy thread routines in their libc that
3061 return a success (Solaris 2.6 for example). */
3062 if (__gthread_once (&once
, eh_threads_initialize
) != 0
3063 || get_eh_context
== &eh_context_initialize
)
3065 /* Use static version of EH context. */
3066 get_eh_context
= &eh_context_static
;
3068 #ifdef DWARF2_UNWIND_INFO
3070 static __gthread_once_t once_regsizes
= __GTHREAD_ONCE_INIT
;
3071 if (__gthread_once (&once_regsizes
, init_reg_size_table
) != 0
3072 || ! dwarf_reg_size_table_initialized
)
3073 init_reg_size_table ();
3077 #else /* no __GTHREADS */
3079 /* Use static version of EH context. */
3080 get_eh_context
= &eh_context_static
;
3082 #ifdef DWARF2_UNWIND_INFO
3083 init_reg_size_table ();
3086 #endif /* no __GTHREADS */
3088 return (*get_eh_context
) ();
3091 /* Return a static EH context. */
3093 static struct eh_context
*
3094 eh_context_static (void)
3096 static struct eh_context eh
;
3097 static int initialized
;
3098 static void *top_elt
[2];
3103 memset (&eh
, 0, sizeof eh
);
3104 eh
.dynamic_handler_chain
= top_elt
;
3110 /* Return a thread specific EH context. */
3112 static struct eh_context
*
3113 eh_context_specific (void)
3115 struct eh_context
*eh
;
3116 eh
= (struct eh_context
*) __gthread_getspecific (eh_context_key
);
3119 eh
= new_eh_context ();
3120 if (__gthread_setspecific (eh_context_key
, (void *) eh
) != 0)
3126 #endif /* __GTHREADS */
3128 /* Support routines for alloc/free during exception handling */
3130 /* __eh_alloc and __eh_free attempt allocation using malloc, but fall back to
3131 the small arena in the eh_context. This is needed because throwing an
3132 out-of-memory exception would fail otherwise. The emergency space is
3133 allocated in blocks of size EH_ALLOC_ALIGN, the
3134 minimum allocation being two blocks. A bitmask indicates which blocks
3135 have been allocated. To indicate the size of an allocation, the bit for
3136 the final block is not set. Hence each allocation is a run of 1s followed
3139 __eh_alloc (size_t size
)
3148 struct eh_context
*eh
= __get_eh_context ();
3149 unsigned blocks
= (size
+ EH_ALLOC_ALIGN
- 1) / EH_ALLOC_ALIGN
;
3150 unsigned real_mask
= eh
->alloc_mask
| (eh
->alloc_mask
<< 1);
3154 if (blocks
> EH_ALLOC_SIZE
/ EH_ALLOC_ALIGN
)
3156 blocks
+= blocks
== 1;
3157 our_mask
= (1 << blocks
) - 1;
3159 for (ix
= EH_ALLOC_SIZE
/ EH_ALLOC_ALIGN
- blocks
; ix
; ix
--)
3160 if (! ((real_mask
>> ix
) & our_mask
))
3162 /* found some space */
3163 p
= &eh
->alloc_buffer
[ix
* EH_ALLOC_ALIGN
];
3164 eh
->alloc_mask
|= (our_mask
>> 1) << ix
;
3172 /* Free the memory for an cp_eh_info and associated exception, given
3173 a pointer to the cp_eh_info. */
3177 struct eh_context
*eh
= __get_eh_context ();
3179 ptrdiff_t diff
= (char *)p
- &eh
->alloc_buffer
[0];
3180 if (diff
>= 0 && diff
< EH_ALLOC_SIZE
)
3182 unsigned mask
= eh
->alloc_mask
;
3183 unsigned bit
= 1 << (diff
/ EH_ALLOC_ALIGN
);
3191 eh
->alloc_mask
= mask
;
3197 /* Support routines for setjmp/longjmp exception handling. */
3199 /* Calls to __sjthrow are generated by the compiler when an exception
3200 is raised when using the setjmp/longjmp exception handling codegen
3203 #ifdef DONT_USE_BUILTIN_SETJMP
3204 extern void longjmp (void *, int);
3207 /* Routine to get the head of the current thread's dynamic handler chain
3208 use for exception handling. */
3211 __get_dynamic_handler_chain (void)
3213 struct eh_context
*eh
= (*get_eh_context
) ();
3214 return &eh
->dynamic_handler_chain
;
3217 /* This is used to throw an exception when the setjmp/longjmp codegen
3218 method is used for exception handling.
3220 We call __terminate if there are no handlers left. Otherwise we run the
3221 cleanup actions off the dynamic cleanup stack, and pop the top of the
3222 dynamic handler chain, and use longjmp to transfer back to the associated
3228 struct eh_context
*eh
= (*get_eh_context
) ();
3229 void ***dhc
= &eh
->dynamic_handler_chain
;
3231 void (*func
)(void *, int);
3233 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3234 void ***cleanup
= (void***)&(*dhc
)[1];
3236 /* If there are any cleanups in the chain, run them now. */
3240 void **buf
= (void**)store
;
3245 #ifdef DONT_USE_BUILTIN_SETJMP
3246 if (! setjmp (&buf
[2]))
3248 if (! __builtin_setjmp (&buf
[2]))
3254 func
= (void(*)(void*, int))cleanup
[0][1];
3255 arg
= (void*)cleanup
[0][2];
3257 /* Update this before running the cleanup. */
3258 cleanup
[0] = (void **)cleanup
[0][0];
3271 /* We must call terminate if we try and rethrow an exception, when
3272 there is no exception currently active and when there are no
3274 if (! eh
->info
|| (*dhc
)[0] == 0)
3277 /* Find the jmpbuf associated with the top element of the dynamic
3278 handler chain. The jumpbuf starts two words into the buffer. */
3279 jmpbuf
= &(*dhc
)[2];
3281 /* Then we pop the top element off the dynamic handler chain. */
3282 *dhc
= (void**)(*dhc
)[0];
3284 /* And then we jump to the handler. */
3286 #ifdef DONT_USE_BUILTIN_SETJMP
3287 longjmp (jmpbuf
, 1);
3289 __builtin_longjmp (jmpbuf
, 1);
3293 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3294 handler, then pop the handler off the dynamic handler stack, and
3295 then throw. This is used to skip the first handler, and transfer
3296 control to the next handler in the dynamic handler stack. */
3299 __sjpopnthrow (void)
3301 struct eh_context
*eh
= (*get_eh_context
) ();
3302 void ***dhc
= &eh
->dynamic_handler_chain
;
3303 void (*func
)(void *, int);
3305 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3306 void ***cleanup
= (void***)&(*dhc
)[1];
3308 /* If there are any cleanups in the chain, run them now. */
3312 void **buf
= (void**)store
;
3317 #ifdef DONT_USE_BUILTIN_SETJMP
3318 if (! setjmp (&buf
[2]))
3320 if (! __builtin_setjmp (&buf
[2]))
3326 func
= (void(*)(void*, int))cleanup
[0][1];
3327 arg
= (void*)cleanup
[0][2];
3329 /* Update this before running the cleanup. */
3330 cleanup
[0] = (void **)cleanup
[0][0];
3343 /* Then we pop the top element off the dynamic handler chain. */
3344 *dhc
= (void**)(*dhc
)[0];
3349 /* Support code for all exception region-based exception handling. */
3352 __eh_rtime_match (void *rtime
)
3355 __eh_matcher matcher
;
3358 info
= *(__get_eh_info ());
3359 matcher
= ((__eh_info
*)info
)->match_function
;
3362 #ifndef inhibit_libc
3363 fprintf (stderr
, "Internal Compiler Bug: No runtime type matcher.");
3367 ret
= (*matcher
) (info
, rtime
, (void *)0);
3368 return (ret
!= NULL
);
3371 /* This value identifies the place from which an exception is being
3374 #ifdef EH_TABLE_LOOKUP
3380 #ifdef DWARF2_UNWIND_INFO
3382 /* Return the table version of an exception descriptor */
3385 __get_eh_table_version (exception_descriptor
*table
)
3387 return table
->lang
.version
;
3390 /* Return the originating table language of an exception descriptor */
3393 __get_eh_table_language (exception_descriptor
*table
)
3395 return table
->lang
.language
;
3398 /* This routine takes a PC and a pointer to the exception region TABLE for
3399 its translation unit, and returns the address of the exception handler
3400 associated with the closest exception table handler entry associated
3401 with that PC, or 0 if there are no table entries the PC fits in.
3403 In the advent of a tie, we have to give the last entry, as it represents
3407 old_find_exception_handler (void *pc
, old_exception_table
*table
)
3414 /* We can't do a binary search because the table isn't guaranteed
3415 to be sorted from function to function. */
3416 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3418 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3420 /* This can apply. Make sure it is at least as small as
3421 the previous best. */
3422 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3423 && table
[pos
].start_region
>= table
[best
].start_region
))
3426 /* But it is sorted by starting PC within a function. */
3427 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3431 return table
[best
].exception_handler
;
3437 /* find_exception_handler finds the correct handler, if there is one, to
3438 handle an exception.
3439 returns a pointer to the handler which controlled should be transferred
3440 to, or NULL if there is nothing left.
3442 PC - pc where the exception originates. If this is a rethrow,
3443 then this starts out as a pointer to the exception table
3444 entry we wish to rethrow out of.
3445 TABLE - exception table for the current module.
3446 EH_INFO - eh info pointer for this exception.
3447 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3448 CLEANUP - returned flag indicating whether this is a cleanup handler.
3451 find_exception_handler (void *pc
, exception_descriptor
*table
,
3452 __eh_info
*eh_info
, int rethrow
, int *cleanup
)
3455 void *retval
= NULL
;
3460 /* The new model assumed the table is sorted inner-most out so the
3461 first region we find which matches is the correct one */
3463 exception_table
*tab
= &(table
->table
[0]);
3465 /* Subtract 1 from the PC to avoid hitting the next region */
3468 /* pc is actually the region table entry to rethrow out of */
3469 pos
= ((exception_table
*) pc
) - tab
;
3470 pc
= ((exception_table
*) pc
)->end_region
- 1;
3472 /* The label is always on the LAST handler entry for a region,
3473 so we know the next entry is a different region, even if the
3474 addresses are the same. Make sure its not end of table tho. */
3475 if (tab
[pos
].start_region
!= (void *) -1)
3481 /* We can't do a binary search because the table is in inner-most
3482 to outermost address ranges within functions */
3483 for ( ; tab
[pos
].start_region
!= (void *) -1; pos
++)
3485 if (tab
[pos
].start_region
<= pc
&& tab
[pos
].end_region
> pc
)
3487 if (tab
[pos
].match_info
)
3489 __eh_matcher matcher
= eh_info
->match_function
;
3490 /* match info but no matcher is NOT a match */
3493 void *ret
= (*matcher
)((void *) eh_info
,
3494 tab
[pos
].match_info
, table
);
3498 retval
= tab
[pos
].exception_handler
;
3507 retval
= tab
[pos
].exception_handler
;
3514 #endif /* DWARF2_UNWIND_INFO */
3515 #endif /* EH_TABLE_LOOKUP */
3517 #ifdef DWARF2_UNWIND_INFO
3518 /* Support code for exception handling using static unwind information. */
3522 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3523 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3524 avoid a warning about casting between int and pointer of different
3527 typedef int ptr_type
__attribute__ ((mode (pointer
)));
3531 word_type
*reg
[DWARF_FRAME_REGISTERS
];
3534 #ifdef INCOMING_REGNO
3535 /* Is the saved value for register REG in frame UDATA stored in a register
3536 window in the previous frame? */
3538 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3539 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3540 compiled functions won't work with the frame-unwind stuff here.
3541 Perhaps the entireity of in_reg_window should be conditional on having
3542 seen a DW_CFA_GNU_window_save? */
3543 #define target_flags 0
3546 in_reg_window (int reg
, frame_state
*udata
)
3548 if (udata
->saved
[reg
] == REG_SAVED_REG
)
3549 return INCOMING_REGNO (reg
) == reg
;
3550 if (udata
->saved
[reg
] != REG_SAVED_OFFSET
)
3553 #ifdef STACK_GROWS_DOWNWARD
3554 return udata
->reg_or_offset
[reg
] > 0;
3556 return udata
->reg_or_offset
[reg
] < 0;
3561 in_reg_window (int reg
__attribute__ ((__unused__
)),
3562 frame_state
*udata
__attribute__ ((__unused__
)))
3566 #endif /* INCOMING_REGNO */
3568 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3569 frame called by UDATA or 0. */
3572 get_reg_addr (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3574 while (udata
->saved
[reg
] == REG_SAVED_REG
)
3576 reg
= udata
->reg_or_offset
[reg
];
3577 if (in_reg_window (reg
, udata
))
3583 if (udata
->saved
[reg
] == REG_SAVED_OFFSET
)
3584 return (word_type
*)(udata
->cfa
+ udata
->reg_or_offset
[reg
]);
3586 /* We don't have a saved copy of this register. */
3590 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3591 frame called by UDATA or 0. */
3593 static inline void *
3594 get_reg (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3596 return (void *)(ptr_type
) *get_reg_addr (reg
, udata
, sub_udata
);
3599 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3602 put_reg (unsigned reg
, void *val
, frame_state
*udata
)
3604 *get_reg_addr (reg
, udata
, NULL
) = (word_type
)(ptr_type
) val
;
3607 /* Copy the saved value for register REG from PTREG to frame
3608 TARGET_UDATA. Unlike the previous two functions, this can handle
3609 registers that are not one word large. */
3612 copy_reg (unsigned reg
, word_type
*preg
, frame_state
*target_udata
)
3614 word_type
*ptreg
= get_reg_addr (reg
, target_udata
, NULL
);
3615 memcpy (ptreg
, preg
, dwarf_reg_size_table
[reg
]);
3618 /* Retrieve the return address for frame UDATA. */
3620 static inline void *
3621 get_return_addr (frame_state
*udata
, frame_state
*sub_udata
)
3623 return __builtin_extract_return_addr
3624 (get_reg (udata
->retaddr_column
, udata
, sub_udata
));
3627 /* Overwrite the return address for frame UDATA with VAL. */
3630 put_return_addr (void *val
, frame_state
*udata
)
3632 val
= __builtin_frob_return_addr (val
);
3633 put_reg (udata
->retaddr_column
, val
, udata
);
3636 /* Given the current frame UDATA and its return address PC, return the
3637 information about the calling frame in CALLER_UDATA and update the
3638 register array in SAVED_REGS. */
3641 next_stack_level (void *pc
, frame_state
*udata
, frame_state
*caller_udata
,
3642 saved_regs_t
*saved_regs
)
3647 /* Collect all of the registers for the current frame. */
3648 for (i
= 0; i
< DWARF_FRAME_REGISTERS
; i
++)
3649 if (udata
->saved
[i
])
3650 saved_regs
->reg
[i
] = get_reg_addr (i
, udata
, caller_udata
);
3652 caller_udata
= __frame_state_for (pc
, caller_udata
);
3656 /* Now go back to our caller's stack frame. If our caller's CFA was
3657 saved in a register in this stack frame or a previous one, restore it;
3658 otherwise, assume CFA register is SP and restore it to our CFA value
3659 (which is defined to be the value of SP in the caller's frame). */
3661 p
= saved_regs
->reg
[caller_udata
->cfa_reg
];
3663 caller_udata
->cfa
= (void *)(ptr_type
)*p
;
3665 caller_udata
->cfa
= udata
->cfa
;
3667 if (caller_udata
->indirect
)
3668 caller_udata
->cfa
= * (void **) ((unsigned char *)caller_udata
->cfa
3669 + caller_udata
->base_offset
);
3670 caller_udata
->cfa
+= caller_udata
->cfa_offset
;
3672 return caller_udata
;
3675 /* Hook to call before __terminate if only cleanup handlers remain. */
3677 __unwinding_cleanup (void)
3681 /* throw_helper performs some of the common grunt work for a throw. This
3682 routine is called by throw and rethrows. This is pretty much split
3683 out from the old __throw routine. An addition has been added which allows
3684 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3685 but cleanups remaining. This allows a debugger to examine the state
3686 at which the throw was executed, before any cleanups, rather than
3687 at the terminate point after the stack has been unwound.
3689 EH is the current eh_context structure.
3690 PC is the address of the call to __throw.
3691 MY_UDATA is the unwind information for __throw.
3692 OFFSET_P is where we return the SP adjustment offset. */
3695 throw_helper (struct eh_context
*eh
, void *pc
, frame_state
*my_udata
,
3698 frame_state ustruct2
, *udata
= &ustruct2
;
3699 frame_state ustruct
;
3700 frame_state
*sub_udata
= &ustruct
;
3701 void *saved_pc
= pc
;
3703 void *handler_p
= 0;
3705 void *callee_cfa
= 0;
3706 frame_state saved_ustruct
;
3709 int only_cleanup
= 0;
3711 int saved_state
= 0;
3713 saved_regs_t saved_regs
, cleanup_regs
;
3714 __eh_info
*eh_info
= (__eh_info
*)eh
->info
;
3717 memset (saved_regs
.reg
, 0, sizeof saved_regs
.reg
);
3718 memset (sub_udata
->saved
, REG_UNSAVED
, sizeof sub_udata
->saved
);
3720 /* Do we find a handler based on a re-throw PC? */
3721 if (eh
->table_index
!= (void *) 0)
3724 memcpy (udata
, my_udata
, sizeof (*udata
));
3726 handler
= (void *) 0;
3729 frame_state
*p
= udata
;
3731 udata
= next_stack_level (pc
, udata
, sub_udata
, &saved_regs
);
3734 /* If we couldn't find the next frame, we lose. */
3738 if (udata
->eh_ptr
== NULL
)
3741 new_eh_model
= (((exception_descriptor
*)(udata
->eh_ptr
))->
3742 runtime_id_field
== NEW_EH_RUNTIME
);
3747 handler
= find_exception_handler (eh
->table_index
, udata
->eh_ptr
,
3748 eh_info
, 1, &cleanup
);
3749 eh
->table_index
= (void *)0;
3753 handler
= find_exception_handler (pc
, udata
->eh_ptr
, eh_info
,
3756 handler
= old_find_exception_handler (pc
, udata
->eh_ptr
);
3758 /* If we found one, we can stop searching, if its not a cleanup.
3759 for cleanups, we save the state, and keep looking. This allows
3760 us to call a debug hook if there are nothing but cleanups left. */
3763 /* sub_udata now refers to the frame called by the handler frame. */
3769 saved_ustruct
= *udata
;
3770 cleanup_regs
= saved_regs
;
3771 handler_p
= handler
;
3775 /* Save the CFA of the frame called by the handler
3777 callee_cfa
= sub_udata
->cfa
;
3784 callee_cfa
= sub_udata
->cfa
;
3789 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3790 hitting the beginning of the next region. */
3791 pc
= get_return_addr (udata
, sub_udata
) - 1;
3796 udata
= &saved_ustruct
;
3797 saved_regs
= cleanup_regs
;
3798 handler
= handler_p
;
3801 __unwinding_cleanup ();
3804 /* If we haven't found a handler by now, this is an unhandled
3809 eh
->handler_label
= handler
;
3811 args_size
= udata
->args_size
;
3813 /* We adjust SP by the difference between __throw's CFA and the CFA for
3814 the frame called by the handler frame, because those CFAs correspond
3815 to the SP values at the two call sites. We need to further adjust by
3816 the args_size of the handler frame itself to get the handler frame's
3817 SP from before the args were pushed for that call. */
3818 #ifdef STACK_GROWS_DOWNWARD
3819 *offset_p
= callee_cfa
- my_udata
->cfa
+ args_size
;
3821 *offset_p
= my_udata
->cfa
- callee_cfa
- args_size
;
3824 /* If we found a handler in the throw context there's no need to
3828 /* Copy saved register values into our register save slots. */
3829 for (i
= 0; i
< DWARF_FRAME_REGISTERS
; i
++)
3830 if (i
!= udata
->retaddr_column
&& saved_regs
.reg
[i
])
3831 copy_reg (i
, saved_regs
.reg
[i
], my_udata
);
3838 /* We first search for an exception handler, and if we don't find
3839 it, we call __terminate on the current stack frame so that we may
3840 use the debugger to walk the stack and understand why no handler
3843 If we find one, then we unwind the frames down to the one that
3844 has the handler and transfer control into the handler. */
3846 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3851 struct eh_context
*eh
= (*get_eh_context
) ();
3855 /* XXX maybe make my_ustruct static so we don't have to look it up for
3857 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3859 /* This is required for C++ semantics. We must call terminate if we
3860 try and rethrow an exception, when there is no exception currently
3865 /* Start at our stack frame. */
3867 my_udata
= __frame_state_for (&&label
, my_udata
);
3871 /* We need to get the value from the CFA register. */
3872 my_udata
->cfa
= __builtin_dwarf_cfa ();
3874 /* Do any necessary initialization to access arbitrary stack frames.
3875 On the SPARC, this means flushing the register windows. */
3876 __builtin_unwind_init ();
3878 /* Now reset pc to the right throw point. The return address points to
3879 the instruction after the call to __throw; we subtract 1 so that pc
3880 points into the call insn itself. Since we work with PC ranges (as
3881 opposed to specific call sites), it isn't important for it to point to
3882 the very beginning of the call insn, and making it do so would be
3883 hard on targets with variable length call insns. */
3884 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3886 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3890 __builtin_eh_return ((void *)eh
, offset
, handler
);
3892 /* Epilogue: restore the handler frame's register values and return
3896 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3899 __rethrow (void *index
)
3901 struct eh_context
*eh
= (*get_eh_context
) ();
3905 /* XXX maybe make my_ustruct static so we don't have to look it up for
3907 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3909 /* This is required for C++ semantics. We must call terminate if we
3910 try and rethrow an exception, when there is no exception currently
3915 /* This is the table index we want to rethrow from. The value of
3916 the END_REGION label is used for the PC of the throw, and the
3917 search begins with the next table entry. */
3918 eh
->table_index
= index
;
3920 /* Start at our stack frame. */
3922 my_udata
= __frame_state_for (&&label
, my_udata
);
3926 /* We need to get the value from the CFA register. */
3927 my_udata
->cfa
= __builtin_dwarf_cfa ();
3929 /* Do any necessary initialization to access arbitrary stack frames.
3930 On the SPARC, this means flushing the register windows. */
3931 __builtin_unwind_init ();
3933 /* Now reset pc to the right throw point. The return address points to
3934 the instruction after the call to __throw; we subtract 1 so that pc
3935 points into the call insn itself. Since we work with PC ranges (as
3936 opposed to specific call sites), it isn't important for it to point to
3937 the very beginning of the call insn, and making it do so would be
3938 hard on targets with variable length call insns. */
3939 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3941 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3945 __builtin_eh_return ((void *)eh
, offset
, handler
);
3947 /* Epilogue: restore the handler frame's register values and return
3950 #endif /* DWARF2_UNWIND_INFO */
3952 #ifdef IA64_UNWIND_INFO
3955 /* Return handler to which we want to transfer control, NULL if we don't
3956 intend to handle this exception here. */
3958 __ia64_personality_v1 (void *pc
, old_exception_table
*table
)
3965 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3967 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3969 /* This can apply. Make sure it is at least as small as
3970 the previous best. */
3971 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3972 && table
[pos
].start_region
>= table
[best
].start_region
))
3975 /* It is sorted by starting PC within a function. */
3976 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3980 return table
[best
].exception_handler
;
3986 ia64_throw_helper (ia64_frame_state
*throw_frame
, ia64_frame_state
*caller
,
3987 void *throw_bsp
, void *throw_sp
)
3989 void *throw_pc
= __builtin_return_address (0);
3990 unwind_info_ptr
*info
;
3991 void *pc
, *handler
= NULL
;
3996 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
3998 /* Start at our stack frame, get our state. */
3999 __build_ia64_frame_state (throw_pc
, throw_frame
, throw_bsp
, throw_sp
,
4002 /* Now we have to find the proper frame for pc, and see if there
4003 is a handler for it. if not, we keep going back frames until
4004 we do find one. Otherwise we call uncaught (). */
4007 memcpy (caller
, throw_frame
, sizeof (*caller
));
4010 void *(*personality
) (void *, old_exception_table
*);
4014 /* We only care about the RP right now, so we dont need to keep
4015 any other information about a call frame right now. */
4016 pc
= __get_real_reg_value (&caller
->rp
) - 1;
4017 bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&caller
->pfs
),
4019 info
= __build_ia64_frame_state (pc
, caller
, bsp
, caller
->my_psp
,
4022 /* If we couldn't find the next frame, we lose. */
4026 personality
= __get_personality (info
);
4027 /* TODO Haven't figured out how to actually load the personality address
4028 yet, so just always default to the one we expect for now. */
4029 if (personality
!= 0)
4030 personality
= __ia64_personality_v1
;
4031 eh_table
= __get_except_table (info
);
4032 /* If there is no personality routine, we'll keep unwinding. */
4034 /* Pass a segment relative PC address to the personality routine,
4035 because the unwind_info section uses segrel relocs. */
4036 handler
= personality ((void *)(pc
- pc_base
), eh_table
);
4042 /* Handler is a segment relative address, so we must adjust it here. */
4043 handler
+= (long) pc_base
;
4045 /* If we found a handler, we need to unwind the stack to that point.
4046 We do this by copying saved values from previous frames into the
4047 save slot for the throw_frame saved slots. when __throw returns,
4048 it'll pickup the correct values. */
4050 /* Start with where __throw saved things, and copy each saved register
4051 of each previous frame until we get to the one before we're
4052 throwing back to. */
4053 memcpy (caller
, throw_frame
, sizeof (*caller
));
4054 for ( ; frame_count
> 0; frame_count
--)
4056 pc
= __get_real_reg_value (&caller
->rp
) - 1;
4057 bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&caller
->pfs
),
4059 __build_ia64_frame_state (pc
, caller
, bsp
, caller
->my_psp
, &pc_base
);
4060 /* Any regs that were saved can be put in the throw frame now. */
4061 /* We don't want to copy any saved register from the
4062 target destination, but we do want to load up it's frame. */
4063 if (frame_count
> 1)
4064 __copy_saved_reg_state (throw_frame
, caller
);
4067 /* Set return address of the throw frame to the handler. */
4068 __set_real_reg_value (&throw_frame
->rp
, handler
);
4070 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4071 /* DO we need to go through the whole loadrs seqeunce? */
4078 register void *stack_pointer
__asm__("r12");
4079 struct eh_context
*eh
= (*get_eh_context
) ();
4080 ia64_frame_state my_frame
;
4081 ia64_frame_state originator
; /* For the context handler is in. */
4082 void *bsp
, *tmp_bsp
;
4085 /* This is required for C++ semantics. We must call terminate if we
4086 try and rethrow an exception, when there is no exception currently
4091 __builtin_unwind_init ();
4093 /* We have to call another routine to actually process the frame
4094 information, which will force all of __throw's local registers into
4097 /* Get the value of ar.bsp while we're here. */
4099 bsp
= __builtin_ia64_bsp ();
4100 ia64_throw_helper (&my_frame
, &originator
, bsp
, stack_pointer
);
4102 /* Now we have to fudge the bsp by the amount in our (__throw)
4103 frame marker, since the return is going to adjust it by that much. */
4105 tmp_bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&my_frame
.pfs
),
4107 offset
= (char *)my_frame
.my_bsp
- (char *)tmp_bsp
;
4108 tmp_bsp
= (char *)originator
.my_bsp
+ offset
;
4110 __builtin_eh_return (tmp_bsp
, offset
, originator
.my_sp
);
4112 /* The return address was already set by throw_helper. */
4115 #endif /* IA64_UNWIND_INFO */