1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000
4 2001 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
42 /* Don't use `fancy_abort' here even if config.h says to use it. */
49 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
50 #if defined (L_divdi3) || defined (L_moddi3)
62 w
.s
.high
= -uu
.s
.high
- ((UWtype
) w
.s
.low
> 0);
70 __addvsi3 (Wtype a
, Wtype b
)
76 if (b
>= 0 ? w
< a
: w
> a
)
85 __addvdi3 (DWtype a
, DWtype b
)
91 if (b
>= 0 ? w
< a
: w
> a
)
100 __subvsi3 (Wtype a
, Wtype b
)
103 return __addvsi3 (a
, (-b
));
109 if (b
>= 0 ? w
> a
: w
< a
)
119 __subvdi3 (DWtype a
, DWtype b
)
128 if (b
>= 0 ? w
> a
: w
< a
)
138 __mulvsi3 (Wtype a
, Wtype b
)
144 if (((a
>= 0) == (b
>= 0)) ? w
< 0 : w
> 0)
159 if (a
>= 0 ? w
> 0 : w
< 0)
174 if (a
>= 0 ? w
> 0 : w
< 0)
223 __mulvdi3 (DWtype u
, DWtype v
)
229 if (((u
>= 0) == (v
>= 0)) ? w
< 0 : w
> 0)
237 /* Unless shift functions are defined whith full ANSI prototypes,
238 parameter b will be promoted to int if word_type is smaller than an int. */
241 __lshrdi3 (DWtype u
, word_type b
)
252 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
256 w
.s
.low
= (UWtype
) uu
.s
.high
>> -bm
;
260 UWtype carries
= (UWtype
) uu
.s
.high
<< bm
;
262 w
.s
.high
= (UWtype
) uu
.s
.high
>> b
;
263 w
.s
.low
= ((UWtype
) uu
.s
.low
>> b
) | carries
;
272 __ashldi3 (DWtype u
, word_type b
)
283 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
287 w
.s
.high
= (UWtype
) uu
.s
.low
<< -bm
;
291 UWtype carries
= (UWtype
) uu
.s
.low
>> bm
;
293 w
.s
.low
= (UWtype
) uu
.s
.low
<< b
;
294 w
.s
.high
= ((UWtype
) uu
.s
.high
<< b
) | carries
;
303 __ashrdi3 (DWtype u
, word_type b
)
314 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
317 /* w.s.high = 1..1 or 0..0 */
318 w
.s
.high
= uu
.s
.high
>> (sizeof (Wtype
) * BITS_PER_UNIT
- 1);
319 w
.s
.low
= uu
.s
.high
>> -bm
;
323 UWtype carries
= (UWtype
) uu
.s
.high
<< bm
;
325 w
.s
.high
= uu
.s
.high
>> b
;
326 w
.s
.low
= ((UWtype
) uu
.s
.low
>> b
) | carries
;
338 UWtype word
, count
, add
;
342 word
= uu
.s
.low
, add
= 0;
343 else if (uu
.s
.high
!= 0)
344 word
= uu
.s
.high
, add
= BITS_PER_UNIT
* sizeof (Wtype
);
348 count_trailing_zeros (count
, word
);
349 return count
+ add
+ 1;
355 __muldi3 (DWtype u
, DWtype v
)
363 w
.ll
= __umulsidi3 (uu
.s
.low
, vv
.s
.low
);
364 w
.s
.high
+= ((UWtype
) uu
.s
.low
* (UWtype
) vv
.s
.high
365 + (UWtype
) uu
.s
.high
* (UWtype
) vv
.s
.low
);
372 #if defined (sdiv_qrnnd)
374 __udiv_w_sdiv (UWtype
*rp
, UWtype a1
, UWtype a0
, UWtype d
)
381 if (a1
< d
- a1
- (a0
>> (W_TYPE_SIZE
- 1)))
383 /* dividend, divisor, and quotient are nonnegative */
384 sdiv_qrnnd (q
, r
, a1
, a0
, d
);
388 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
389 sub_ddmmss (c1
, c0
, a1
, a0
, d
>> 1, d
<< (W_TYPE_SIZE
- 1));
390 /* Divide (c1*2^32 + c0) by d */
391 sdiv_qrnnd (q
, r
, c1
, c0
, d
);
392 /* Add 2^31 to quotient */
393 q
+= (UWtype
) 1 << (W_TYPE_SIZE
- 1);
398 b1
= d
>> 1; /* d/2, between 2^30 and 2^31 - 1 */
399 c1
= a1
>> 1; /* A/2 */
400 c0
= (a1
<< (W_TYPE_SIZE
- 1)) + (a0
>> 1);
402 if (a1
< b1
) /* A < 2^32*b1, so A/2 < 2^31*b1 */
404 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
406 r
= 2*r
+ (a0
& 1); /* Remainder from A/(2*b1) */
423 else if (c1
< b1
) /* So 2^31 <= (A/2)/b1 < 2^32 */
426 c0
= ~c0
; /* logical NOT */
428 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
430 q
= ~q
; /* (A/2)/b1 */
433 r
= 2*r
+ (a0
& 1); /* A/(2*b1) */
451 else /* Implies c1 = b1 */
452 { /* Hence a1 = d - 1 = 2*b1 - 1 */
470 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
472 __udiv_w_sdiv (UWtype
*rp
__attribute__ ((__unused__
)),
473 UWtype a1
__attribute__ ((__unused__
)),
474 UWtype a0
__attribute__ ((__unused__
)),
475 UWtype d
__attribute__ ((__unused__
)))
482 #if (defined (L_udivdi3) || defined (L_divdi3) || \
483 defined (L_umoddi3) || defined (L_moddi3))
488 const UQItype __clz_tab
[] =
490 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
491 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
492 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
493 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
494 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
495 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
496 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
497 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
503 #if (defined (L_udivdi3) || defined (L_divdi3) || \
504 defined (L_umoddi3) || defined (L_moddi3))
508 __udivmoddi4 (UDWtype n
, UDWtype d
, UDWtype
*rp
)
513 UWtype d0
, d1
, n0
, n1
, n2
;
525 #if !UDIV_NEEDS_NORMALIZATION
532 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
535 /* Remainder in n0. */
542 d0
= 1 / d0
; /* Divide intentionally by zero. */
544 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
545 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
547 /* Remainder in n0. */
558 #else /* UDIV_NEEDS_NORMALIZATION */
566 count_leading_zeros (bm
, d0
);
570 /* Normalize, i.e. make the most significant bit of the
574 n1
= (n1
<< bm
) | (n0
>> (W_TYPE_SIZE
- bm
));
578 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
581 /* Remainder in n0 >> bm. */
588 d0
= 1 / d0
; /* Divide intentionally by zero. */
590 count_leading_zeros (bm
, d0
);
594 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
595 conclude (the most significant bit of n1 is set) /\ (the
596 leading quotient digit q1 = 1).
598 This special case is necessary, not an optimization.
599 (Shifts counts of W_TYPE_SIZE are undefined.) */
608 b
= W_TYPE_SIZE
- bm
;
612 n1
= (n1
<< bm
) | (n0
>> b
);
615 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
620 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
622 /* Remainder in n0 >> bm. */
632 #endif /* UDIV_NEEDS_NORMALIZATION */
643 /* Remainder in n1n0. */
655 count_leading_zeros (bm
, d1
);
658 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
659 conclude (the most significant bit of n1 is set) /\ (the
660 quotient digit q0 = 0 or 1).
662 This special case is necessary, not an optimization. */
664 /* The condition on the next line takes advantage of that
665 n1 >= d1 (true due to program flow). */
666 if (n1
> d1
|| n0
>= d0
)
669 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
688 b
= W_TYPE_SIZE
- bm
;
690 d1
= (d1
<< bm
) | (d0
>> b
);
693 n1
= (n1
<< bm
) | (n0
>> b
);
696 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
697 umul_ppmm (m1
, m0
, q0
, d0
);
699 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
702 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
707 /* Remainder in (n1n0 - m1m0) >> bm. */
710 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
711 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
712 rr
.s
.high
= n1
>> bm
;
727 __divdi3 (DWtype u
, DWtype v
)
738 uu
.ll
= __negdi2 (uu
.ll
);
741 vv
.ll
= __negdi2 (vv
.ll
);
743 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDWtype
*) 0);
753 __moddi3 (DWtype u
, DWtype v
)
764 uu
.ll
= __negdi2 (uu
.ll
);
766 vv
.ll
= __negdi2 (vv
.ll
);
768 (void) __udivmoddi4 (uu
.ll
, vv
.ll
, &w
);
778 __umoddi3 (UDWtype u
, UDWtype v
)
782 (void) __udivmoddi4 (u
, v
, &w
);
790 __udivdi3 (UDWtype n
, UDWtype d
)
792 return __udivmoddi4 (n
, d
, (UDWtype
*) 0);
798 __cmpdi2 (DWtype a
, DWtype b
)
802 au
.ll
= a
, bu
.ll
= b
;
804 if (au
.s
.high
< bu
.s
.high
)
806 else if (au
.s
.high
> bu
.s
.high
)
808 if ((UWtype
) au
.s
.low
< (UWtype
) bu
.s
.low
)
810 else if ((UWtype
) au
.s
.low
> (UWtype
) bu
.s
.low
)
818 __ucmpdi2 (DWtype a
, DWtype b
)
822 au
.ll
= a
, bu
.ll
= b
;
824 if ((UWtype
) au
.s
.high
< (UWtype
) bu
.s
.high
)
826 else if ((UWtype
) au
.s
.high
> (UWtype
) bu
.s
.high
)
828 if ((UWtype
) au
.s
.low
< (UWtype
) bu
.s
.low
)
830 else if ((UWtype
) au
.s
.low
> (UWtype
) bu
.s
.low
)
836 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
837 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
838 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
841 __fixunstfDI (TFtype a
)
849 /* Compute high word of result, as a flonum. */
850 b
= (a
/ HIGH_WORD_COEFF
);
851 /* Convert that to fixed (but not to DWtype!),
852 and shift it into the high word. */
855 /* Remove high part from the TFtype, leaving the low part as flonum. */
857 /* Convert that to fixed (but not to DWtype!) and add it in.
858 Sometimes A comes out negative. This is significant, since
859 A has more bits than a long int does. */
868 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
873 return - __fixunstfDI (-a
);
874 return __fixunstfDI (a
);
878 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
879 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
880 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
883 __fixunsxfDI (XFtype a
)
891 /* Compute high word of result, as a flonum. */
892 b
= (a
/ HIGH_WORD_COEFF
);
893 /* Convert that to fixed (but not to DWtype!),
894 and shift it into the high word. */
897 /* Remove high part from the XFtype, leaving the low part as flonum. */
899 /* Convert that to fixed (but not to DWtype!) and add it in.
900 Sometimes A comes out negative. This is significant, since
901 A has more bits than a long int does. */
910 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
915 return - __fixunsxfDI (-a
);
916 return __fixunsxfDI (a
);
921 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
922 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
925 __fixunsdfDI (DFtype a
)
933 /* Compute high word of result, as a flonum. */
934 b
= (a
/ HIGH_WORD_COEFF
);
935 /* Convert that to fixed (but not to DWtype!),
936 and shift it into the high word. */
939 /* Remove high part from the DFtype, leaving the low part as flonum. */
941 /* Convert that to fixed (but not to DWtype!) and add it in.
942 Sometimes A comes out negative. This is significant, since
943 A has more bits than a long int does. */
957 return - __fixunsdfDI (-a
);
958 return __fixunsdfDI (a
);
963 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
964 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
967 __fixunssfDI (SFtype original_a
)
969 /* Convert the SFtype to a DFtype, because that is surely not going
970 to lose any bits. Some day someone else can write a faster version
971 that avoids converting to DFtype, and verify it really works right. */
972 DFtype a
= original_a
;
979 /* Compute high word of result, as a flonum. */
980 b
= (a
/ HIGH_WORD_COEFF
);
981 /* Convert that to fixed (but not to DWtype!),
982 and shift it into the high word. */
985 /* Remove high part from the DFtype, leaving the low part as flonum. */
987 /* Convert that to fixed (but not to DWtype!) and add it in.
988 Sometimes A comes out negative. This is significant, since
989 A has more bits than a long int does. */
1000 __fixsfdi (SFtype a
)
1003 return - __fixunssfDI (-a
);
1004 return __fixunssfDI (a
);
1008 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
1009 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1010 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1011 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1014 __floatdixf (DWtype u
)
1018 d
= (Wtype
) (u
>> WORD_SIZE
);
1019 d
*= HIGH_HALFWORD_COEFF
;
1020 d
*= HIGH_HALFWORD_COEFF
;
1021 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1027 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
1028 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1029 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1030 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1033 __floatditf (DWtype u
)
1037 d
= (Wtype
) (u
>> WORD_SIZE
);
1038 d
*= HIGH_HALFWORD_COEFF
;
1039 d
*= HIGH_HALFWORD_COEFF
;
1040 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1047 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1048 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1049 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1052 __floatdidf (DWtype u
)
1056 d
= (Wtype
) (u
>> WORD_SIZE
);
1057 d
*= HIGH_HALFWORD_COEFF
;
1058 d
*= HIGH_HALFWORD_COEFF
;
1059 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1066 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1067 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1068 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1069 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
1071 /* Define codes for all the float formats that we know of. Note
1072 that this is copied from real.h. */
1074 #define UNKNOWN_FLOAT_FORMAT 0
1075 #define IEEE_FLOAT_FORMAT 1
1076 #define VAX_FLOAT_FORMAT 2
1077 #define IBM_FLOAT_FORMAT 3
1079 /* Default to IEEE float if not specified. Nearly all machines use it. */
1080 #ifndef HOST_FLOAT_FORMAT
1081 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1084 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1089 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1094 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1100 __floatdisf (DWtype u
)
1102 /* Do the calculation in DFmode
1103 so that we don't lose any of the precision of the high word
1104 while multiplying it. */
1107 /* Protect against double-rounding error.
1108 Represent any low-order bits, that might be truncated in DFmode,
1109 by a bit that won't be lost. The bit can go in anywhere below the
1110 rounding position of the SFmode. A fixed mask and bit position
1111 handles all usual configurations. It doesn't handle the case
1112 of 128-bit DImode, however. */
1113 if (DF_SIZE
< DI_SIZE
1114 && DF_SIZE
> (DI_SIZE
- DF_SIZE
+ SF_SIZE
))
1116 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
1117 if (! (- ((DWtype
) 1 << DF_SIZE
) < u
1118 && u
< ((DWtype
) 1 << DF_SIZE
)))
1120 if ((UDWtype
) u
& (REP_BIT
- 1))
1124 f
= (Wtype
) (u
>> WORD_SIZE
);
1125 f
*= HIGH_HALFWORD_COEFF
;
1126 f
*= HIGH_HALFWORD_COEFF
;
1127 f
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1133 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1134 /* Reenable the normal types, in case limits.h needs them. */
1147 __fixunsxfSI (XFtype a
)
1149 if (a
>= - (DFtype
) LONG_MIN
)
1150 return (Wtype
) (a
+ LONG_MIN
) - LONG_MIN
;
1156 /* Reenable the normal types, in case limits.h needs them. */
1169 __fixunsdfSI (DFtype a
)
1171 if (a
>= - (DFtype
) LONG_MIN
)
1172 return (Wtype
) (a
+ LONG_MIN
) - LONG_MIN
;
1178 /* Reenable the normal types, in case limits.h needs them. */
1191 __fixunssfSI (SFtype a
)
1193 if (a
>= - (SFtype
) LONG_MIN
)
1194 return (Wtype
) (a
+ LONG_MIN
) - LONG_MIN
;
1199 /* From here on down, the routines use normal data types. */
1201 #define SItype bogus_type
1202 #define USItype bogus_type
1203 #define DItype bogus_type
1204 #define UDItype bogus_type
1205 #define SFtype bogus_type
1206 #define DFtype bogus_type
1224 /* Like bcmp except the sign is meaningful.
1225 Result is negative if S1 is less than S2,
1226 positive if S1 is greater, 0 if S1 and S2 are equal. */
1229 __gcc_bcmp (const unsigned char *s1
, const unsigned char *s2
, size_t size
)
1233 unsigned char c1
= *s1
++, c2
= *s2
++;
1250 #if defined(__svr4__) || defined(__alliant__)
1254 /* The Alliant needs the added underscore. */
1255 asm (".globl __builtin_saveregs");
1256 asm ("__builtin_saveregs:");
1257 asm (".globl ___builtin_saveregs");
1258 asm ("___builtin_saveregs:");
1260 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1261 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1262 area and also for a new va_list
1264 /* Save all argument registers in the arg reg save area. The
1265 arg reg save area must have the following layout (according
1277 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1278 asm (" fst.q %f12,16(%sp)");
1280 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1281 asm (" st.l %r17,36(%sp)");
1282 asm (" st.l %r18,40(%sp)");
1283 asm (" st.l %r19,44(%sp)");
1284 asm (" st.l %r20,48(%sp)");
1285 asm (" st.l %r21,52(%sp)");
1286 asm (" st.l %r22,56(%sp)");
1287 asm (" st.l %r23,60(%sp)");
1288 asm (" st.l %r24,64(%sp)");
1289 asm (" st.l %r25,68(%sp)");
1290 asm (" st.l %r26,72(%sp)");
1291 asm (" st.l %r27,76(%sp)");
1293 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1294 va_list structure. Put in into
1295 r16 so that it will be returned
1298 /* Initialize all fields of the new va_list structure. This
1299 structure looks like:
1302 unsigned long ireg_used;
1303 unsigned long freg_used;
1309 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1310 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1311 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1312 asm (" bri %r1"); /* delayed return */
1313 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1315 #else /* not __svr4__ */
1316 #if defined(__PARAGON__)
1318 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1319 * and we stand a better chance of hooking into libraries
1320 * compiled by PGI. [andyp@ssd.intel.com]
1324 asm (".globl __builtin_saveregs");
1325 asm ("__builtin_saveregs:");
1326 asm (".globl ___builtin_saveregs");
1327 asm ("___builtin_saveregs:");
1329 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1330 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1331 area and also for a new va_list
1333 /* Save all argument registers in the arg reg save area. The
1334 arg reg save area must have the following layout (according
1346 asm (" fst.q f8, 0(sp)");
1347 asm (" fst.q f12,16(sp)");
1348 asm (" st.l r16,32(sp)");
1349 asm (" st.l r17,36(sp)");
1350 asm (" st.l r18,40(sp)");
1351 asm (" st.l r19,44(sp)");
1352 asm (" st.l r20,48(sp)");
1353 asm (" st.l r21,52(sp)");
1354 asm (" st.l r22,56(sp)");
1355 asm (" st.l r23,60(sp)");
1356 asm (" st.l r24,64(sp)");
1357 asm (" st.l r25,68(sp)");
1358 asm (" st.l r26,72(sp)");
1359 asm (" st.l r27,76(sp)");
1361 asm (" adds 80,sp,r16"); /* compute the address of the new
1362 va_list structure. Put in into
1363 r16 so that it will be returned
1366 /* Initialize all fields of the new va_list structure. This
1367 structure looks like:
1370 unsigned long ireg_used;
1371 unsigned long freg_used;
1377 asm (" st.l r0, 0(r16)"); /* nfixed */
1378 asm (" st.l r0, 4(r16)"); /* nfloating */
1379 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1380 asm (" bri r1"); /* delayed return */
1381 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1382 #else /* not __PARAGON__ */
1386 asm (".globl ___builtin_saveregs");
1387 asm ("___builtin_saveregs:");
1388 asm (" mov sp,r30");
1389 asm (" andnot 0x0f,sp,sp");
1390 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1392 /* Fill in the __va_struct. */
1393 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1394 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1395 asm (" st.l r18, 8(sp)");
1396 asm (" st.l r19,12(sp)");
1397 asm (" st.l r20,16(sp)");
1398 asm (" st.l r21,20(sp)");
1399 asm (" st.l r22,24(sp)");
1400 asm (" st.l r23,28(sp)");
1401 asm (" st.l r24,32(sp)");
1402 asm (" st.l r25,36(sp)");
1403 asm (" st.l r26,40(sp)");
1404 asm (" st.l r27,44(sp)");
1406 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1407 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1409 /* Fill in the __va_ctl. */
1410 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1411 asm (" st.l r28,84(sp)"); /* pointer to more args */
1412 asm (" st.l r0, 88(sp)"); /* nfixed */
1413 asm (" st.l r0, 92(sp)"); /* nfloating */
1415 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1417 asm (" mov r30,sp");
1418 /* recover stack and pass address to start
1420 #endif /* not __PARAGON__ */
1421 #endif /* not __svr4__ */
1422 #else /* not __i860__ */
1424 asm (".global __builtin_saveregs");
1425 asm ("__builtin_saveregs:");
1426 asm (".global ___builtin_saveregs");
1427 asm ("___builtin_saveregs:");
1428 #ifdef NEED_PROC_COMMAND
1431 asm ("st %i0,[%fp+68]");
1432 asm ("st %i1,[%fp+72]");
1433 asm ("st %i2,[%fp+76]");
1434 asm ("st %i3,[%fp+80]");
1435 asm ("st %i4,[%fp+84]");
1437 asm ("st %i5,[%fp+88]");
1438 #ifdef NEED_TYPE_COMMAND
1439 asm (".type __builtin_saveregs,#function");
1440 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1442 #else /* not __sparc__ */
1443 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1447 asm (" .set nomips16");
1449 asm (" .ent __builtin_saveregs");
1450 asm (" .globl __builtin_saveregs");
1451 asm ("__builtin_saveregs:");
1452 asm (" sw $4,0($30)");
1453 asm (" sw $5,4($30)");
1454 asm (" sw $6,8($30)");
1455 asm (" sw $7,12($30)");
1457 asm (" .end __builtin_saveregs");
1458 #else /* not __mips__, etc. */
1460 void * ATTRIBUTE_NORETURN
1461 __builtin_saveregs ()
1466 #endif /* not __mips__ */
1467 #endif /* not __sparc__ */
1468 #endif /* not __i860__ */
1472 #ifndef inhibit_libc
1474 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1476 /* This is used by the `assert' macro. */
1478 __eprintf (const char *string
, const char *expression
,
1479 unsigned int line
, const char *filename
)
1481 fprintf (stderr
, string
, expression
, line
, filename
);
1491 /* Structure emitted by -a */
1495 const char *filename
;
1499 const unsigned long *addresses
;
1501 /* Older GCC's did not emit these fields. */
1503 const char **functions
;
1504 const long *line_nums
;
1505 const char **filenames
;
1509 #ifdef BLOCK_PROFILER_CODE
1512 #ifndef inhibit_libc
1514 /* Simple minded basic block profiling output dumper for
1515 systems that don't provide tcov support. At present,
1516 it requires atexit and stdio. */
1518 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1520 char *ctime
PARAMS ((const time_t *));
1522 #include "gbl-ctors.h"
1523 #include "gcov-io.h"
1525 #ifdef TARGET_HAS_F_SETLKW
1530 static struct bb
*bb_head
;
1532 static int num_digits (long value
, int base
) __attribute__ ((const));
1534 /* Return the number of digits needed to print a value */
1535 /* __inline__ */ static int num_digits (long value
, int base
)
1537 int minus
= (value
< 0 && base
!= 16);
1538 unsigned long v
= (minus
) ? -value
: value
;
1552 __bb_exit_func (void)
1554 FILE *da_file
, *file
;
1561 i
= strlen (bb_head
->filename
) - 3;
1563 if (!strcmp (bb_head
->filename
+i
, ".da"))
1565 /* Must be -fprofile-arcs not -a.
1566 Dump data in a form that gcov expects. */
1570 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1574 /* Make sure the output file exists -
1575 but don't clobber exiting data. */
1576 if ((da_file
= fopen (ptr
->filename
, "a")) != 0)
1579 /* Need to re-open in order to be able to write from the start. */
1580 da_file
= fopen (ptr
->filename
, "r+b");
1581 /* Some old systems might not allow the 'b' mode modifier.
1582 Therefore, try to open without it. This can lead to a race
1583 condition so that when you delete and re-create the file, the
1584 file might be opened in text mode, but then, you shouldn't
1585 delete the file in the first place. */
1587 da_file
= fopen (ptr
->filename
, "r+");
1590 fprintf (stderr
, "arc profiling: Can't open output file %s.\n",
1595 /* After a fork, another process might try to read and/or write
1596 the same file simultanously. So if we can, lock the file to
1597 avoid race conditions. */
1598 #if defined (TARGET_HAS_F_SETLKW)
1600 struct flock s_flock
;
1602 s_flock
.l_type
= F_WRLCK
;
1603 s_flock
.l_whence
= SEEK_SET
;
1604 s_flock
.l_start
= 0;
1606 s_flock
.l_pid
= getpid ();
1608 while (fcntl (fileno (da_file
), F_SETLKW
, &s_flock
)
1613 /* If the file is not empty, and the number of counts in it is the
1614 same, then merge them in. */
1615 firstchar
= fgetc (da_file
);
1616 if (firstchar
== EOF
)
1618 if (ferror (da_file
))
1620 fprintf (stderr
, "arc profiling: Can't read output file ");
1621 perror (ptr
->filename
);
1628 if (ungetc (firstchar
, da_file
) == EOF
)
1630 if (__read_long (&n_counts
, da_file
, 8) != 0)
1632 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1637 if (n_counts
== ptr
->ncounts
)
1641 for (i
= 0; i
< n_counts
; i
++)
1645 if (__read_long (&v
, da_file
, 8) != 0)
1647 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1651 ptr
->counts
[i
] += v
;
1659 /* ??? Should first write a header to the file. Preferably, a 4 byte
1660 magic number, 4 bytes containing the time the program was
1661 compiled, 4 bytes containing the last modification time of the
1662 source file, and 4 bytes indicating the compiler options used.
1664 That way we can easily verify that the proper source/executable/
1665 data file combination is being used from gcov. */
1667 if (__write_long (ptr
->ncounts
, da_file
, 8) != 0)
1670 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1676 long *count_ptr
= ptr
->counts
;
1678 for (j
= ptr
->ncounts
; j
> 0; j
--)
1680 if (__write_long (*count_ptr
, da_file
, 8) != 0)
1688 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1692 if (fclose (da_file
) == EOF
)
1693 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1700 /* Must be basic block profiling. Emit a human readable output file. */
1702 file
= fopen ("bb.out", "a");
1711 /* This is somewhat type incorrect, but it avoids worrying about
1712 exactly where time.h is included from. It should be ok unless
1713 a void * differs from other pointer formats, or if sizeof (long)
1714 is < sizeof (time_t). It would be nice if we could assume the
1715 use of rationale standards here. */
1717 time ((void *) &time_value
);
1718 fprintf (file
, "Basic block profiling finished on %s\n", ctime ((void *) &time_value
));
1720 /* We check the length field explicitly in order to allow compatibility
1721 with older GCC's which did not provide it. */
1723 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1726 int func_p
= (ptr
->nwords
>= (long) sizeof (struct bb
)
1727 && ptr
->nwords
<= 1000
1729 int line_p
= (func_p
&& ptr
->line_nums
);
1730 int file_p
= (func_p
&& ptr
->filenames
);
1731 int addr_p
= (ptr
->addresses
!= 0);
1732 long ncounts
= ptr
->ncounts
;
1738 int blk_len
= num_digits (ncounts
, 10);
1743 fprintf (file
, "File %s, %ld basic blocks \n\n",
1744 ptr
->filename
, ncounts
);
1746 /* Get max values for each field. */
1747 for (i
= 0; i
< ncounts
; i
++)
1752 if (cnt_max
< ptr
->counts
[i
])
1753 cnt_max
= ptr
->counts
[i
];
1755 if (addr_p
&& (unsigned long) addr_max
< ptr
->addresses
[i
])
1756 addr_max
= ptr
->addresses
[i
];
1758 if (line_p
&& line_max
< ptr
->line_nums
[i
])
1759 line_max
= ptr
->line_nums
[i
];
1763 p
= (ptr
->functions
[i
]) ? (ptr
->functions
[i
]) : "<none>";
1771 p
= (ptr
->filenames
[i
]) ? (ptr
->filenames
[i
]) : "<none>";
1778 addr_len
= num_digits (addr_max
, 16);
1779 cnt_len
= num_digits (cnt_max
, 10);
1780 line_len
= num_digits (line_max
, 10);
1782 /* Now print out the basic block information. */
1783 for (i
= 0; i
< ncounts
; i
++)
1786 " Block #%*d: executed %*ld time(s)",
1788 cnt_len
, ptr
->counts
[i
]);
1791 fprintf (file
, " address= 0x%.*lx", addr_len
,
1795 fprintf (file
, " function= %-*s", func_len
,
1796 (ptr
->functions
[i
]) ? ptr
->functions
[i
] : "<none>");
1799 fprintf (file
, " line= %*ld", line_len
, ptr
->line_nums
[i
]);
1802 fprintf (file
, " file= %s",
1803 (ptr
->filenames
[i
]) ? ptr
->filenames
[i
] : "<none>");
1805 fprintf (file
, "\n");
1808 fprintf (file
, "\n");
1812 fprintf (file
, "\n\n");
1818 __bb_init_func (struct bb
*blocks
)
1820 /* User is supposed to check whether the first word is non-0,
1821 but just in case.... */
1823 if (blocks
->zero_word
)
1826 /* Initialize destructor. */
1828 atexit (__bb_exit_func
);
1830 /* Set up linked list. */
1831 blocks
->zero_word
= 1;
1832 blocks
->next
= bb_head
;
1836 /* Called before fork or exec - write out profile information gathered so
1837 far and reset it to zero. This avoids duplication or loss of the
1838 profile information gathered so far. */
1840 __bb_fork_func (void)
1845 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1848 for (i
= ptr
->ncounts
- 1; i
>= 0; i
--)
1853 #ifndef MACHINE_STATE_SAVE
1854 #define MACHINE_STATE_SAVE(ID)
1856 #ifndef MACHINE_STATE_RESTORE
1857 #define MACHINE_STATE_RESTORE(ID)
1860 /* Number of buckets in hashtable of basic block addresses. */
1862 #define BB_BUCKETS 311
1864 /* Maximum length of string in file bb.in. */
1866 #define BBINBUFSIZE 500
1870 struct bb_edge
*next
;
1871 unsigned long src_addr
;
1872 unsigned long dst_addr
;
1873 unsigned long count
;
1878 TRACE_KEEP
= 0, TRACE_ON
= 1, TRACE_OFF
= 2
1883 struct bb_func
*next
;
1886 enum bb_func_mode mode
;
1889 /* This is the connection to the outside world.
1890 The BLOCK_PROFILER macro must set __bb.blocks
1891 and __bb.blockno. */
1894 unsigned long blockno
;
1898 /* Vars to store addrs of source and destination basic blocks
1901 static unsigned long bb_src
= 0;
1902 static unsigned long bb_dst
= 0;
1904 static FILE *bb_tracefile
= (FILE *) 0;
1905 static struct bb_edge
**bb_hashbuckets
= (struct bb_edge
**) 0;
1906 static struct bb_func
*bb_func_head
= (struct bb_func
*) 0;
1907 static unsigned long bb_callcount
= 0;
1908 static int bb_mode
= 0;
1910 static unsigned long *bb_stack
= (unsigned long *) 0;
1911 static size_t bb_stacksize
= 0;
1913 static int reported
= 0;
1916 Always : Print execution frequencies of basic blocks
1918 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1919 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1920 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1921 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1926 /*#include <sys/types.h>*/
1927 #include <sys/stat.h>
1928 /*#include <malloc.h>*/
1930 /* Commands executed by gopen. */
1932 #define GOPENDECOMPRESS "gzip -cd "
1933 #define GOPENCOMPRESS "gzip -c >"
1935 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1936 If it does not compile, simply replace gopen by fopen and delete
1937 '.gz' from any first parameter to gopen. */
1940 gopen (char *fn
, char *mode
)
1948 if (mode
[0] != 'r' && mode
[0] != 'w')
1951 p
= fn
+ strlen (fn
)-1;
1952 use_gzip
= ((p
[-1] == '.' && (p
[0] == 'Z' || p
[0] == 'z'))
1953 || (p
[-2] == '.' && p
[-1] == 'g' && p
[0] == 'z'));
1960 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1961 + sizeof (GOPENDECOMPRESS
));
1962 strcpy (s
, GOPENDECOMPRESS
);
1963 strcpy (s
+ (sizeof (GOPENDECOMPRESS
)-1), fn
);
1964 f
= popen (s
, mode
);
1972 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1973 + sizeof (GOPENCOMPRESS
));
1974 strcpy (s
, GOPENCOMPRESS
);
1975 strcpy (s
+ (sizeof (GOPENCOMPRESS
)-1), fn
);
1976 if (!(f
= popen (s
, mode
)))
1977 f
= fopen (s
, mode
);
1984 return fopen (fn
, mode
);
1994 if (!fstat (fileno (f
), &buf
) && S_ISFIFO (buf
.st_mode
))
2002 #endif /* HAVE_POPEN */
2004 /* Called once per program. */
2007 __bb_exit_trace_func (void)
2009 FILE *file
= fopen ("bb.out", "a");
2022 gclose (bb_tracefile
);
2024 fclose (bb_tracefile
);
2025 #endif /* HAVE_POPEN */
2028 /* Check functions in `bb.in'. */
2033 const struct bb_func
*p
;
2034 int printed_something
= 0;
2038 /* This is somewhat type incorrect. */
2039 time ((void *) &time_value
);
2041 for (p
= bb_func_head
; p
!= (struct bb_func
*) 0; p
= p
->next
)
2043 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
2045 if (!ptr
->filename
|| (p
->filename
!= (char *) 0 && strcmp (p
->filename
, ptr
->filename
)))
2047 for (blk
= 0; blk
< ptr
->ncounts
; blk
++)
2049 if (!strcmp (p
->funcname
, ptr
->functions
[blk
]))
2054 if (!printed_something
)
2056 fprintf (file
, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value
));
2057 printed_something
= 1;
2060 fprintf (file
, "\tFunction %s", p
->funcname
);
2062 fprintf (file
, " of file %s", p
->filename
);
2063 fprintf (file
, "\n" );
2068 if (printed_something
)
2069 fprintf (file
, "\n");
2075 if (!bb_hashbuckets
)
2079 fprintf (stderr
, "Profiler: out of memory\n");
2089 unsigned long addr_max
= 0;
2090 unsigned long cnt_max
= 0;
2094 /* This is somewhat type incorrect, but it avoids worrying about
2095 exactly where time.h is included from. It should be ok unless
2096 a void * differs from other pointer formats, or if sizeof (long)
2097 is < sizeof (time_t). It would be nice if we could assume the
2098 use of rationale standards here. */
2100 time ((void *) &time_value
);
2101 fprintf (file
, "Basic block jump tracing");
2103 switch (bb_mode
& 12)
2106 fprintf (file
, " (with call)");
2110 /* Print nothing. */
2114 fprintf (file
, " (with call & ret)");
2118 fprintf (file
, " (with ret)");
2122 fprintf (file
, " finished on %s\n", ctime ((void *) &time_value
));
2124 for (i
= 0; i
< BB_BUCKETS
; i
++)
2126 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2127 for ( ; bucket
; bucket
= bucket
->next
)
2129 if (addr_max
< bucket
->src_addr
)
2130 addr_max
= bucket
->src_addr
;
2131 if (addr_max
< bucket
->dst_addr
)
2132 addr_max
= bucket
->dst_addr
;
2133 if (cnt_max
< bucket
->count
)
2134 cnt_max
= bucket
->count
;
2137 addr_len
= num_digits (addr_max
, 16);
2138 cnt_len
= num_digits (cnt_max
, 10);
2140 for ( i
= 0; i
< BB_BUCKETS
; i
++)
2142 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2143 for ( ; bucket
; bucket
= bucket
->next
)
2146 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
2147 addr_len
, bucket
->src_addr
,
2148 addr_len
, bucket
->dst_addr
,
2149 cnt_len
, bucket
->count
);
2153 fprintf (file
, "\n");
2161 /* Free allocated memory. */
2166 struct bb_func
*old
= f
;
2169 if (old
->funcname
) free (old
->funcname
);
2170 if (old
->filename
) free (old
->filename
);
2181 for (i
= 0; i
< BB_BUCKETS
; i
++)
2183 struct bb_edge
*old
, *bucket
= bb_hashbuckets
[i
];
2188 bucket
= bucket
->next
;
2192 free (bb_hashbuckets
);
2195 for (b
= bb_head
; b
; b
= b
->next
)
2196 if (b
->flags
) free (b
->flags
);
2199 /* Called once per program. */
2202 __bb_init_prg (void)
2205 char buf
[BBINBUFSIZE
];
2208 enum bb_func_mode m
;
2211 /* Initialize destructor. */
2212 atexit (__bb_exit_func
);
2214 if (!(file
= fopen ("bb.in", "r")))
2217 while(fgets (buf
, BBINBUFSIZE
, file
) != 0)
2233 if (!strcmp (p
, "__bb_trace__"))
2235 else if (!strcmp (p
, "__bb_jumps__"))
2237 else if (!strcmp (p
, "__bb_hidecall__"))
2239 else if (!strcmp (p
, "__bb_showret__"))
2243 struct bb_func
*f
= (struct bb_func
*) malloc (sizeof (struct bb_func
));
2247 f
->next
= bb_func_head
;
2248 if ((pos
= strchr (p
, ':')))
2250 if (!(f
->funcname
= (char *) malloc (strlen (pos
+1)+1)))
2252 strcpy (f
->funcname
, pos
+1);
2254 if ((f
->filename
= (char *) malloc (l
+1)))
2256 strncpy (f
->filename
, p
, l
);
2257 f
->filename
[l
] = '\0';
2260 f
->filename
= (char *) 0;
2264 if (!(f
->funcname
= (char *) malloc (strlen (p
)+1)))
2266 strcpy (f
->funcname
, p
);
2267 f
->filename
= (char *) 0;
2279 bb_tracefile
= gopen ("bbtrace.gz", "w");
2284 bb_tracefile
= fopen ("bbtrace", "w");
2286 #endif /* HAVE_POPEN */
2290 bb_hashbuckets
= (struct bb_edge
**)
2291 malloc (BB_BUCKETS
* sizeof (struct bb_edge
*));
2293 /* Use a loop here rather than calling bzero to avoid having to
2294 conditionalize its existance. */
2295 for (i
= 0; i
< BB_BUCKETS
; i
++)
2296 bb_hashbuckets
[i
] = 0;
2302 bb_stack
= (unsigned long *) malloc (bb_stacksize
* sizeof (*bb_stack
));
2305 /* Initialize destructor. */
2306 atexit (__bb_exit_trace_func
);
2309 /* Called upon entering a basic block. */
2312 __bb_trace_func (void)
2314 struct bb_edge
*bucket
;
2316 MACHINE_STATE_SAVE("1")
2318 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2321 bb_dst
= __bb
.blocks
->addresses
[__bb
.blockno
];
2322 __bb
.blocks
->counts
[__bb
.blockno
]++;
2326 fwrite (&bb_dst
, sizeof (unsigned long), 1, bb_tracefile
);
2331 struct bb_edge
**startbucket
, **oldnext
;
2333 oldnext
= startbucket
2334 = & bb_hashbuckets
[ (((int) bb_src
*8) ^ (int) bb_dst
) % BB_BUCKETS
];
2335 bucket
= *startbucket
;
2337 for (bucket
= *startbucket
; bucket
;
2338 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2340 if (bucket
->src_addr
== bb_src
2341 && bucket
->dst_addr
== bb_dst
)
2344 *oldnext
= bucket
->next
;
2345 bucket
->next
= *startbucket
;
2346 *startbucket
= bucket
;
2351 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2357 fprintf (stderr
, "Profiler: out of memory\n");
2364 bucket
->src_addr
= bb_src
;
2365 bucket
->dst_addr
= bb_dst
;
2366 bucket
->next
= *startbucket
;
2367 *startbucket
= bucket
;
2378 MACHINE_STATE_RESTORE("1")
2382 /* Called when returning from a function and `__bb_showret__' is set. */
2385 __bb_trace_func_ret (void)
2387 struct bb_edge
*bucket
;
2389 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2394 struct bb_edge
**startbucket
, **oldnext
;
2396 oldnext
= startbucket
2397 = & bb_hashbuckets
[ (((int) bb_dst
* 8) ^ (int) bb_src
) % BB_BUCKETS
];
2398 bucket
= *startbucket
;
2400 for (bucket
= *startbucket
; bucket
;
2401 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2403 if (bucket
->src_addr
== bb_dst
2404 && bucket
->dst_addr
== bb_src
)
2407 *oldnext
= bucket
->next
;
2408 bucket
->next
= *startbucket
;
2409 *startbucket
= bucket
;
2414 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2420 fprintf (stderr
, "Profiler: out of memory\n");
2427 bucket
->src_addr
= bb_dst
;
2428 bucket
->dst_addr
= bb_src
;
2429 bucket
->next
= *startbucket
;
2430 *startbucket
= bucket
;
2443 /* Called upon entering the first function of a file. */
2446 __bb_init_file (struct bb
*blocks
)
2449 const struct bb_func
*p
;
2450 long blk
, ncounts
= blocks
->ncounts
;
2451 const char **functions
= blocks
->functions
;
2453 /* Set up linked list. */
2454 blocks
->zero_word
= 1;
2455 blocks
->next
= bb_head
;
2460 || !(blocks
->flags
= (char *) malloc (sizeof (char) * blocks
->ncounts
)))
2463 for (blk
= 0; blk
< ncounts
; blk
++)
2464 blocks
->flags
[blk
] = 0;
2466 for (blk
= 0; blk
< ncounts
; blk
++)
2468 for (p
= bb_func_head
; p
; p
= p
->next
)
2470 if (!strcmp (p
->funcname
, functions
[blk
])
2471 && (!p
->filename
|| !strcmp (p
->filename
, blocks
->filename
)))
2473 blocks
->flags
[blk
] |= p
->mode
;
2480 /* Called when exiting from a function. */
2483 __bb_trace_ret (void)
2486 MACHINE_STATE_SAVE("2")
2490 if ((bb_mode
& 12) && bb_stacksize
> bb_callcount
)
2492 bb_src
= bb_stack
[bb_callcount
];
2494 __bb_trace_func_ret ();
2500 MACHINE_STATE_RESTORE("2")
2504 /* Called when entering a function. */
2507 __bb_init_trace_func (struct bb
*blocks
, unsigned long blockno
)
2509 static int trace_init
= 0;
2511 MACHINE_STATE_SAVE("3")
2513 if (!blocks
->zero_word
)
2520 __bb_init_file (blocks
);
2530 if (bb_callcount
>= bb_stacksize
)
2532 size_t newsize
= bb_callcount
+ 100;
2534 bb_stack
= (unsigned long *) realloc (bb_stack
, newsize
);
2539 fprintf (stderr
, "Profiler: out of memory\n");
2543 goto stack_overflow
;
2545 bb_stacksize
= newsize
;
2547 bb_stack
[bb_callcount
] = bb_src
;
2558 else if (blocks
->flags
&& (blocks
->flags
[blockno
] & TRACE_ON
))
2564 bb_stack
[bb_callcount
] = bb_src
;
2567 MACHINE_STATE_RESTORE("3")
2570 #endif /* not inhibit_libc */
2571 #endif /* not BLOCK_PROFILER_CODE */
2574 #ifdef L_clear_cache
2575 /* Clear part of an instruction cache. */
2577 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2580 __clear_cache (char *beg
__attribute__((__unused__
)),
2581 char *end
__attribute__((__unused__
)))
2583 #ifdef CLEAR_INSN_CACHE
2584 CLEAR_INSN_CACHE (beg
, end
);
2586 #ifdef INSN_CACHE_SIZE
2587 static char array
[INSN_CACHE_SIZE
+ INSN_CACHE_PLANE_SIZE
+ INSN_CACHE_LINE_WIDTH
];
2588 static int initialized
;
2592 typedef (*function_ptr
) (void);
2594 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2595 /* It's cheaper to clear the whole cache.
2596 Put in a series of jump instructions so that calling the beginning
2597 of the cache will clear the whole thing. */
2601 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2602 & -INSN_CACHE_LINE_WIDTH
);
2603 int end_ptr
= ptr
+ INSN_CACHE_SIZE
;
2605 while (ptr
< end_ptr
)
2607 *(INSTRUCTION_TYPE
*)ptr
2608 = JUMP_AHEAD_INSTRUCTION
+ INSN_CACHE_LINE_WIDTH
;
2609 ptr
+= INSN_CACHE_LINE_WIDTH
;
2611 *(INSTRUCTION_TYPE
*) (ptr
- INSN_CACHE_LINE_WIDTH
) = RETURN_INSTRUCTION
;
2616 /* Call the beginning of the sequence. */
2617 (((function_ptr
) (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2618 & -INSN_CACHE_LINE_WIDTH
))
2621 #else /* Cache is large. */
2625 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2626 & -INSN_CACHE_LINE_WIDTH
);
2628 while (ptr
< (int) array
+ sizeof array
)
2630 *(INSTRUCTION_TYPE
*)ptr
= RETURN_INSTRUCTION
;
2631 ptr
+= INSN_CACHE_LINE_WIDTH
;
2637 /* Find the location in array that occupies the same cache line as BEG. */
2639 offset
= ((int) beg
& -INSN_CACHE_LINE_WIDTH
) & (INSN_CACHE_PLANE_SIZE
- 1);
2640 start_addr
= (((int) (array
+ INSN_CACHE_PLANE_SIZE
- 1)
2641 & -INSN_CACHE_PLANE_SIZE
)
2644 /* Compute the cache alignment of the place to stop clearing. */
2645 #if 0 /* This is not needed for gcc's purposes. */
2646 /* If the block to clear is bigger than a cache plane,
2647 we clear the entire cache, and OFFSET is already correct. */
2648 if (end
< beg
+ INSN_CACHE_PLANE_SIZE
)
2650 offset
= (((int) (end
+ INSN_CACHE_LINE_WIDTH
- 1)
2651 & -INSN_CACHE_LINE_WIDTH
)
2652 & (INSN_CACHE_PLANE_SIZE
- 1));
2654 #if INSN_CACHE_DEPTH > 1
2655 end_addr
= (start_addr
& -INSN_CACHE_PLANE_SIZE
) + offset
;
2656 if (end_addr
<= start_addr
)
2657 end_addr
+= INSN_CACHE_PLANE_SIZE
;
2659 for (plane
= 0; plane
< INSN_CACHE_DEPTH
; plane
++)
2661 int addr
= start_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2662 int stop
= end_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2664 while (addr
!= stop
)
2666 /* Call the return instruction at ADDR. */
2667 ((function_ptr
) addr
) ();
2669 addr
+= INSN_CACHE_LINE_WIDTH
;
2672 #else /* just one plane */
2675 /* Call the return instruction at START_ADDR. */
2676 ((function_ptr
) start_addr
) ();
2678 start_addr
+= INSN_CACHE_LINE_WIDTH
;
2680 while ((start_addr
% INSN_CACHE_SIZE
) != offset
);
2681 #endif /* just one plane */
2682 #endif /* Cache is large */
2683 #endif /* Cache exists */
2684 #endif /* CLEAR_INSN_CACHE */
2687 #endif /* L_clear_cache */
2691 /* Jump to a trampoline, loading the static chain address. */
2693 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2706 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2710 mprotect (char *addr
, int len
, int prot
)
2727 if (VirtualProtect (addr
, len
, np
, &op
))
2733 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2735 #ifdef TRANSFER_FROM_TRAMPOLINE
2736 TRANSFER_FROM_TRAMPOLINE
2739 #if defined (NeXT) && defined (__MACH__)
2741 /* Make stack executable so we can call trampolines on stack.
2742 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2746 #include <mach/mach.h>
2750 __enable_execute_stack (char *addr
)
2753 char *eaddr
= addr
+ TRAMPOLINE_SIZE
;
2754 vm_address_t a
= (vm_address_t
) addr
;
2756 /* turn on execute access on stack */
2757 r
= vm_protect (task_self (), a
, TRAMPOLINE_SIZE
, FALSE
, VM_PROT_ALL
);
2758 if (r
!= KERN_SUCCESS
)
2760 mach_error("vm_protect VM_PROT_ALL", r
);
2764 /* We inline the i-cache invalidation for speed */
2766 #ifdef CLEAR_INSN_CACHE
2767 CLEAR_INSN_CACHE (addr
, eaddr
);
2769 __clear_cache ((int) addr
, (int) eaddr
);
2773 #endif /* defined (NeXT) && defined (__MACH__) */
2777 /* Make stack executable so we can call trampolines on stack.
2778 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2780 #include <sys/mman.h>
2781 #include <sys/vmparam.h>
2782 #include <machine/machparam.h>
2785 __enable_execute_stack (void)
2788 static unsigned lowest
= USRSTACK
;
2789 unsigned current
= (unsigned) &fp
& -NBPG
;
2791 if (lowest
> current
)
2793 unsigned len
= lowest
- current
;
2794 mremap (current
, &len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_PRIVATE
);
2798 /* Clear instruction cache in case an old trampoline is in it. */
2801 #endif /* __convex__ */
2805 /* Modified from the convex -code above. */
2807 #include <sys/param.h>
2809 #include <sys/m88kbcs.h>
2812 __enable_execute_stack (void)
2815 static unsigned long lowest
= USRSTACK
;
2816 unsigned long current
= (unsigned long) &save_errno
& -NBPC
;
2818 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2819 address is seen as 'negative'. That is the case with the stack. */
2822 if (lowest
> current
)
2824 unsigned len
=lowest
-current
;
2825 memctl(current
,len
,MCT_TEXT
);
2829 memctl(current
,NBPC
,MCT_TEXT
);
2833 #endif /* __sysV88__ */
2837 #include <sys/signal.h>
2840 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2841 so define it here, because we need it in __clear_insn_cache below */
2842 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2843 hence we enable this stuff only if MCT_TEXT is #define'd. */
2858 /* Clear instruction cache so we can call trampolines on stack.
2859 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2862 __clear_insn_cache (void)
2867 /* Preserve errno, because users would be surprised to have
2868 errno changing without explicitly calling any system-call. */
2871 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2872 No need to use an address derived from _start or %sp, as 0 works also. */
2873 memctl(0, 4096, MCT_TEXT
);
2878 #endif /* __sysV68__ */
2882 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2884 #include <sys/mman.h>
2885 #include <sys/types.h>
2886 #include <sys/param.h>
2887 #include <sys/vmmac.h>
2889 /* Modified from the convex -code above.
2890 mremap promises to clear the i-cache. */
2893 __enable_execute_stack (void)
2896 if (mprotect (((unsigned int)&fp
/PAGSIZ
)*PAGSIZ
, PAGSIZ
,
2897 PROT_READ
|PROT_WRITE
|PROT_EXEC
))
2899 perror ("mprotect in __enable_execute_stack");
2904 #endif /* __pyr__ */
2906 #if defined (sony_news) && defined (SYSTYPE_BSD)
2909 #include <sys/types.h>
2910 #include <sys/param.h>
2911 #include <syscall.h>
2912 #include <machine/sysnews.h>
2914 /* cacheflush function for NEWS-OS 4.2.
2915 This function is called from trampoline-initialize code
2916 defined in config/mips/mips.h. */
2919 cacheflush (char *beg
, int size
, int flag
)
2921 if (syscall (SYS_sysnews
, NEWS_CACHEFLUSH
, beg
, size
, FLUSH_BCACHE
))
2923 perror ("cache_flush");
2929 #endif /* sony_news */
2930 #endif /* L_trampoline */
2935 #include "gbl-ctors.h"
2936 /* Some systems use __main in a way incompatible with its use in gcc, in these
2937 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2938 give the same symbol without quotes for an alternative entry point. You
2939 must define both, or neither. */
2941 #define NAME__MAIN "__main"
2942 #define SYMBOL__MAIN __main
2945 #ifdef INIT_SECTION_ASM_OP
2946 #undef HAS_INIT_SECTION
2947 #define HAS_INIT_SECTION
2950 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2952 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2953 code to run constructors. In that case, we need to handle EH here, too. */
2955 #ifdef EH_FRAME_SECTION
2957 extern unsigned char __EH_FRAME_BEGIN__
[];
2960 /* Run all the global destructors on exit from the program. */
2963 __do_global_dtors (void)
2965 #ifdef DO_GLOBAL_DTORS_BODY
2966 DO_GLOBAL_DTORS_BODY
;
2968 static func_ptr
*p
= __DTOR_LIST__
+ 1;
2975 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2977 static int completed
= 0;
2981 __deregister_frame_info (__EH_FRAME_BEGIN__
);
2988 #ifndef HAS_INIT_SECTION
2989 /* Run all the global constructors on entry to the program. */
2992 __do_global_ctors (void)
2994 #ifdef EH_FRAME_SECTION
2996 static struct object object
;
2997 __register_frame_info (__EH_FRAME_BEGIN__
, &object
);
3000 DO_GLOBAL_CTORS_BODY
;
3001 atexit (__do_global_dtors
);
3003 #endif /* no HAS_INIT_SECTION */
3005 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
3006 /* Subroutine called automatically by `main'.
3007 Compiling a global function named `main'
3008 produces an automatic call to this function at the beginning.
3010 For many systems, this routine calls __do_global_ctors.
3011 For systems which support a .init section we use the .init section
3012 to run __do_global_ctors, so we need not do anything here. */
3017 /* Support recursive calls to `main': run initializers just once. */
3018 static int initialized
;
3022 __do_global_ctors ();
3025 #endif /* no HAS_INIT_SECTION or INVOKE__main */
3027 #endif /* L__main */
3028 #endif /* __CYGWIN__ */
3032 #include "gbl-ctors.h"
3034 /* Provide default definitions for the lists of constructors and
3035 destructors, so that we don't get linker errors. These symbols are
3036 intentionally bss symbols, so that gld and/or collect will provide
3037 the right values. */
3039 /* We declare the lists here with two elements each,
3040 so that they are valid empty lists if no other definition is loaded.
3042 If we are using the old "set" extensions to have the gnu linker
3043 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
3044 must be in the bss/common section.
3046 Long term no port should use those extensions. But many still do. */
3047 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
3048 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
3049 func_ptr __CTOR_LIST__
[2] = {0, 0};
3050 func_ptr __DTOR_LIST__
[2] = {0, 0};
3052 func_ptr __CTOR_LIST__
[2];
3053 func_ptr __DTOR_LIST__
[2];
3055 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
3056 #endif /* L_ctors */
3060 #include "gbl-ctors.h"
3068 static func_ptr
*atexit_chain
= 0;
3069 static long atexit_chain_length
= 0;
3070 static volatile long last_atexit_chain_slot
= -1;
3073 atexit (func_ptr func
)
3075 if (++last_atexit_chain_slot
== atexit_chain_length
)
3077 atexit_chain_length
+= 32;
3079 atexit_chain
= (func_ptr
*) realloc (atexit_chain
, atexit_chain_length
3080 * sizeof (func_ptr
));
3082 atexit_chain
= (func_ptr
*) malloc (atexit_chain_length
3083 * sizeof (func_ptr
));
3086 atexit_chain_length
= 0;
3087 last_atexit_chain_slot
= -1;
3092 atexit_chain
[last_atexit_chain_slot
] = func
;
3096 extern void _cleanup (void);
3097 extern void _exit (int) __attribute__ ((__noreturn__
));
3104 for ( ; last_atexit_chain_slot
-- >= 0; )
3106 (*atexit_chain
[last_atexit_chain_slot
+ 1]) ();
3107 atexit_chain
[last_atexit_chain_slot
+ 1] = 0;
3109 free (atexit_chain
);
3122 /* Simple; we just need a wrapper for ON_EXIT. */
3124 atexit (func_ptr func
)
3126 return ON_EXIT (func
);
3129 #endif /* ON_EXIT */
3130 #endif /* NEED_ATEXIT */
3138 /* Shared exception handling support routines. */
3141 __default_terminate (void)
3146 static __terminate_func_ptr __terminate_func
=
3147 __default_terminate
;
3149 void __attribute__((__noreturn__
))
3152 (*__terminate_func
)();
3155 __terminate_func_ptr
3156 __terminate_set_func (__terminate_func_ptr newfunc
)
3158 __terminate_func_ptr oldfunc
= __terminate_func
;
3160 __terminate_func
= newfunc
;
3165 __throw_type_match (void *catch_type
, void *throw_type
, void *obj
)
3168 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3169 catch_type
, throw_type
);
3171 if (strcmp ((const char *)catch_type
, (const char *)throw_type
) == 0)
3182 /* Include definitions of EH context and table layout */
3184 #include "eh-common.h"
3185 #ifndef inhibit_libc
3189 /* Allocate and return a new EH context structure. */
3193 new_eh_context (void)
3195 struct eh_full_context
{
3196 struct eh_context c
;
3198 } *ehfc
= (struct eh_full_context
*) malloc (sizeof *ehfc
);
3203 memset (ehfc
, 0, sizeof *ehfc
);
3205 ehfc
->c
.dynamic_handler_chain
= (void **) ehfc
->top_elt
;
3207 /* This should optimize out entirely. This should always be true,
3208 but just in case it ever isn't, don't allow bogus code to be
3211 if ((void*)(&ehfc
->c
) != (void*)ehfc
)
3217 static __gthread_key_t eh_context_key
;
3219 /* Destructor for struct eh_context. */
3221 eh_context_free (void *ptr
)
3223 __gthread_key_dtor (eh_context_key
, ptr
);
3229 /* Pointer to function to return EH context. */
3231 static struct eh_context
*eh_context_initialize (void);
3232 static struct eh_context
*eh_context_static (void);
3234 static struct eh_context
*eh_context_specific (void);
3237 static struct eh_context
*(*get_eh_context
) (void) = &eh_context_initialize
;
3239 /* Routine to get EH context.
3240 This one will simply call the function pointer. */
3243 __get_eh_context (void)
3245 return (void *) (*get_eh_context
) ();
3248 /* Get and set the language specific info pointer. */
3251 __get_eh_info (void)
3253 struct eh_context
*eh
= (*get_eh_context
) ();
3257 #ifdef DWARF2_UNWIND_INFO
3258 static int dwarf_reg_size_table_initialized
= 0;
3259 static char dwarf_reg_size_table
[DWARF_FRAME_REGISTERS
];
3262 init_reg_size_table (void)
3264 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table
);
3265 dwarf_reg_size_table_initialized
= 1;
3271 eh_threads_initialize (void)
3273 /* Try to create the key. If it fails, revert to static method,
3274 otherwise start using thread specific EH contexts. */
3275 if (__gthread_key_create (&eh_context_key
, &eh_context_free
) == 0)
3276 get_eh_context
= &eh_context_specific
;
3278 get_eh_context
= &eh_context_static
;
3280 #endif /* no __GTHREADS */
3282 /* Initialize EH context.
3283 This will be called only once, since we change GET_EH_CONTEXT
3284 pointer to another routine. */
3286 static struct eh_context
*
3287 eh_context_initialize (void)
3291 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
3292 /* Make sure that get_eh_context does not point to us anymore.
3293 Some systems have dummy thread routines in their libc that
3294 return a success (Solaris 2.6 for example). */
3295 if (__gthread_once (&once
, eh_threads_initialize
) != 0
3296 || get_eh_context
== &eh_context_initialize
)
3298 /* Use static version of EH context. */
3299 get_eh_context
= &eh_context_static
;
3301 #ifdef DWARF2_UNWIND_INFO
3303 static __gthread_once_t once_regsizes
= __GTHREAD_ONCE_INIT
;
3304 if (__gthread_once (&once_regsizes
, init_reg_size_table
) != 0
3305 || ! dwarf_reg_size_table_initialized
)
3306 init_reg_size_table ();
3310 #else /* no __GTHREADS */
3312 /* Use static version of EH context. */
3313 get_eh_context
= &eh_context_static
;
3315 #ifdef DWARF2_UNWIND_INFO
3316 init_reg_size_table ();
3319 #endif /* no __GTHREADS */
3321 return (*get_eh_context
) ();
3324 /* Return a static EH context. */
3326 static struct eh_context
*
3327 eh_context_static (void)
3329 static struct eh_context eh
;
3330 static int initialized
;
3331 static void *top_elt
[2];
3336 memset (&eh
, 0, sizeof eh
);
3337 eh
.dynamic_handler_chain
= top_elt
;
3343 /* Return a thread specific EH context. */
3345 static struct eh_context
*
3346 eh_context_specific (void)
3348 struct eh_context
*eh
;
3349 eh
= (struct eh_context
*) __gthread_getspecific (eh_context_key
);
3352 eh
= new_eh_context ();
3353 if (__gthread_setspecific (eh_context_key
, (void *) eh
) != 0)
3359 #endif /* __GTHREADS */
3361 /* Support routines for alloc/free during exception handling */
3363 /* __eh_alloc and __eh_free attempt allocation using malloc, but fall back to
3364 the small arena in the eh_context. This is needed because throwing an
3365 out-of-memory exception would fail otherwise. The emergency space is
3366 allocated in blocks of size EH_ALLOC_ALIGN, the
3367 minimum allocation being two blocks. A bitmask indicates which blocks
3368 have been allocated. To indicate the size of an allocation, the bit for
3369 the final block is not set. Hence each allocation is a run of 1s followed
3372 __eh_alloc (size_t size
)
3381 struct eh_context
*eh
= __get_eh_context ();
3382 unsigned blocks
= (size
+ EH_ALLOC_ALIGN
- 1) / EH_ALLOC_ALIGN
;
3383 unsigned real_mask
= eh
->alloc_mask
| (eh
->alloc_mask
<< 1);
3387 if (blocks
> EH_ALLOC_SIZE
/ EH_ALLOC_ALIGN
)
3389 blocks
+= blocks
== 1;
3390 our_mask
= (1 << blocks
) - 1;
3392 for (ix
= EH_ALLOC_SIZE
/ EH_ALLOC_ALIGN
- blocks
; ix
; ix
--)
3393 if (! ((real_mask
>> ix
) & our_mask
))
3395 /* found some space */
3396 p
= &eh
->alloc_buffer
[ix
* EH_ALLOC_ALIGN
];
3397 eh
->alloc_mask
|= (our_mask
>> 1) << ix
;
3405 /* Free the memory for an cp_eh_info and associated exception, given
3406 a pointer to the cp_eh_info. */
3410 struct eh_context
*eh
= __get_eh_context ();
3412 ptrdiff_t diff
= (char *)p
- &eh
->alloc_buffer
[0];
3413 if (diff
>= 0 && diff
< EH_ALLOC_SIZE
)
3415 unsigned mask
= eh
->alloc_mask
;
3416 unsigned bit
= 1 << (diff
/ EH_ALLOC_ALIGN
);
3424 eh
->alloc_mask
= mask
;
3430 /* Support routines for setjmp/longjmp exception handling. */
3432 /* Calls to __sjthrow are generated by the compiler when an exception
3433 is raised when using the setjmp/longjmp exception handling codegen
3436 #ifdef DONT_USE_BUILTIN_SETJMP
3437 extern void longjmp (void *, int);
3440 /* Routine to get the head of the current thread's dynamic handler chain
3441 use for exception handling. */
3444 __get_dynamic_handler_chain (void)
3446 struct eh_context
*eh
= (*get_eh_context
) ();
3447 return &eh
->dynamic_handler_chain
;
3450 /* This is used to throw an exception when the setjmp/longjmp codegen
3451 method is used for exception handling.
3453 We call __terminate if there are no handlers left. Otherwise we run the
3454 cleanup actions off the dynamic cleanup stack, and pop the top of the
3455 dynamic handler chain, and use longjmp to transfer back to the associated
3461 struct eh_context
*eh
= (*get_eh_context
) ();
3462 void ***dhc
= &eh
->dynamic_handler_chain
;
3464 void (*func
)(void *, int);
3466 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3467 void ***cleanup
= (void***)&(*dhc
)[1];
3469 /* If there are any cleanups in the chain, run them now. */
3473 void **buf
= (void**)store
;
3478 #ifdef DONT_USE_BUILTIN_SETJMP
3479 if (! setjmp (&buf
[2]))
3481 if (! __builtin_setjmp (&buf
[2]))
3487 func
= (void(*)(void*, int))cleanup
[0][1];
3488 arg
= (void*)cleanup
[0][2];
3490 /* Update this before running the cleanup. */
3491 cleanup
[0] = (void **)cleanup
[0][0];
3504 /* We must call terminate if we try and rethrow an exception, when
3505 there is no exception currently active and when there are no
3507 if (! eh
->info
|| (*dhc
)[0] == 0)
3510 /* Find the jmpbuf associated with the top element of the dynamic
3511 handler chain. The jumpbuf starts two words into the buffer. */
3512 jmpbuf
= &(*dhc
)[2];
3514 /* Then we pop the top element off the dynamic handler chain. */
3515 *dhc
= (void**)(*dhc
)[0];
3517 /* And then we jump to the handler. */
3519 #ifdef DONT_USE_BUILTIN_SETJMP
3520 longjmp (jmpbuf
, 1);
3522 __builtin_longjmp (jmpbuf
, 1);
3526 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3527 handler, then pop the handler off the dynamic handler stack, and
3528 then throw. This is used to skip the first handler, and transfer
3529 control to the next handler in the dynamic handler stack. */
3532 __sjpopnthrow (void)
3534 struct eh_context
*eh
= (*get_eh_context
) ();
3535 void ***dhc
= &eh
->dynamic_handler_chain
;
3536 void (*func
)(void *, int);
3538 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3539 void ***cleanup
= (void***)&(*dhc
)[1];
3541 /* If there are any cleanups in the chain, run them now. */
3545 void **buf
= (void**)store
;
3550 #ifdef DONT_USE_BUILTIN_SETJMP
3551 if (! setjmp (&buf
[2]))
3553 if (! __builtin_setjmp (&buf
[2]))
3559 func
= (void(*)(void*, int))cleanup
[0][1];
3560 arg
= (void*)cleanup
[0][2];
3562 /* Update this before running the cleanup. */
3563 cleanup
[0] = (void **)cleanup
[0][0];
3576 /* Then we pop the top element off the dynamic handler chain. */
3577 *dhc
= (void**)(*dhc
)[0];
3582 /* Support code for all exception region-based exception handling. */
3585 __eh_rtime_match (void *rtime
)
3588 __eh_matcher matcher
;
3591 info
= *(__get_eh_info ());
3592 matcher
= ((__eh_info
*)info
)->match_function
;
3595 #ifndef inhibit_libc
3596 fprintf (stderr
, "Internal Compiler Bug: No runtime type matcher.");
3600 ret
= (*matcher
) (info
, rtime
, (void *)0);
3601 return (ret
!= NULL
);
3604 /* This value identifies the place from which an exception is being
3607 #ifdef EH_TABLE_LOOKUP
3613 #ifdef DWARF2_UNWIND_INFO
3615 /* Return the table version of an exception descriptor */
3618 __get_eh_table_version (exception_descriptor
*table
)
3620 return table
->lang
.version
;
3623 /* Return the originating table language of an exception descriptor */
3626 __get_eh_table_language (exception_descriptor
*table
)
3628 return table
->lang
.language
;
3631 /* This routine takes a PC and a pointer to the exception region TABLE for
3632 its translation unit, and returns the address of the exception handler
3633 associated with the closest exception table handler entry associated
3634 with that PC, or 0 if there are no table entries the PC fits in.
3636 In the advent of a tie, we have to give the last entry, as it represents
3640 old_find_exception_handler (void *pc
, old_exception_table
*table
)
3647 /* We can't do a binary search because the table isn't guaranteed
3648 to be sorted from function to function. */
3649 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3651 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3653 /* This can apply. Make sure it is at least as small as
3654 the previous best. */
3655 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3656 && table
[pos
].start_region
>= table
[best
].start_region
))
3659 /* But it is sorted by starting PC within a function. */
3660 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3664 return table
[best
].exception_handler
;
3670 /* find_exception_handler finds the correct handler, if there is one, to
3671 handle an exception.
3672 returns a pointer to the handler which controlled should be transferred
3673 to, or NULL if there is nothing left.
3675 PC - pc where the exception originates. If this is a rethrow,
3676 then this starts out as a pointer to the exception table
3677 entry we wish to rethrow out of.
3678 TABLE - exception table for the current module.
3679 EH_INFO - eh info pointer for this exception.
3680 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3681 CLEANUP - returned flag indicating whether this is a cleanup handler.
3684 find_exception_handler (void *pc
, exception_descriptor
*table
,
3685 __eh_info
*eh_info
, int rethrow
, int *cleanup
)
3688 void *retval
= NULL
;
3693 /* The new model assumed the table is sorted inner-most out so the
3694 first region we find which matches is the correct one */
3696 exception_table
*tab
= &(table
->table
[0]);
3698 /* Subtract 1 from the PC to avoid hitting the next region */
3701 /* pc is actually the region table entry to rethrow out of */
3702 pos
= ((exception_table
*) pc
) - tab
;
3703 pc
= ((exception_table
*) pc
)->end_region
- 1;
3705 /* The label is always on the LAST handler entry for a region,
3706 so we know the next entry is a different region, even if the
3707 addresses are the same. Make sure its not end of table tho. */
3708 if (tab
[pos
].start_region
!= (void *) -1)
3714 /* We can't do a binary search because the table is in inner-most
3715 to outermost address ranges within functions */
3716 for ( ; tab
[pos
].start_region
!= (void *) -1; pos
++)
3718 if (tab
[pos
].start_region
<= pc
&& tab
[pos
].end_region
> pc
)
3720 if (tab
[pos
].match_info
)
3722 __eh_matcher matcher
= eh_info
->match_function
;
3723 /* match info but no matcher is NOT a match */
3726 void *ret
= (*matcher
)((void *) eh_info
,
3727 tab
[pos
].match_info
, table
);
3731 retval
= tab
[pos
].exception_handler
;
3740 retval
= tab
[pos
].exception_handler
;
3747 #endif /* DWARF2_UNWIND_INFO */
3748 #endif /* EH_TABLE_LOOKUP */
3750 #ifdef DWARF2_UNWIND_INFO
3751 /* Support code for exception handling using static unwind information. */
3755 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3756 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3757 avoid a warning about casting between int and pointer of different
3760 typedef int ptr_type
__attribute__ ((mode (pointer
)));
3764 word_type
*reg
[DWARF_FRAME_REGISTERS
];
3767 #ifdef INCOMING_REGNO
3768 /* Is the saved value for register REG in frame UDATA stored in a register
3769 window in the previous frame? */
3771 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3772 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3773 compiled functions won't work with the frame-unwind stuff here.
3774 Perhaps the entireity of in_reg_window should be conditional on having
3775 seen a DW_CFA_GNU_window_save? */
3776 #define target_flags 0
3779 in_reg_window (int reg
, frame_state
*udata
)
3781 if (udata
->saved
[reg
] == REG_SAVED_REG
)
3782 return INCOMING_REGNO (reg
) == reg
;
3783 if (udata
->saved
[reg
] != REG_SAVED_OFFSET
)
3786 #ifdef STACK_GROWS_DOWNWARD
3787 return udata
->reg_or_offset
[reg
] > 0;
3789 return udata
->reg_or_offset
[reg
] < 0;
3794 in_reg_window (int reg
__attribute__ ((__unused__
)),
3795 frame_state
*udata
__attribute__ ((__unused__
)))
3799 #endif /* INCOMING_REGNO */
3801 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3802 frame called by UDATA or 0. */
3805 get_reg_addr (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3807 while (udata
->saved
[reg
] == REG_SAVED_REG
)
3809 reg
= udata
->reg_or_offset
[reg
];
3810 if (in_reg_window (reg
, udata
))
3816 if (udata
->saved
[reg
] == REG_SAVED_OFFSET
)
3817 return (word_type
*)(udata
->cfa
+ udata
->reg_or_offset
[reg
]);
3819 /* We don't have a saved copy of this register. */
3823 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3824 frame called by UDATA or 0. */
3826 static inline void *
3827 get_reg (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3829 return (void *)(ptr_type
) *get_reg_addr (reg
, udata
, sub_udata
);
3832 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3835 put_reg (unsigned reg
, void *val
, frame_state
*udata
)
3837 *get_reg_addr (reg
, udata
, NULL
) = (word_type
)(ptr_type
) val
;
3840 /* Copy the saved value for register REG from PTREG to frame
3841 TARGET_UDATA. Unlike the previous two functions, this can handle
3842 registers that are not one word large. */
3845 copy_reg (unsigned reg
, word_type
*preg
, frame_state
*target_udata
)
3847 word_type
*ptreg
= get_reg_addr (reg
, target_udata
, NULL
);
3848 memcpy (ptreg
, preg
, dwarf_reg_size_table
[reg
]);
3851 /* Retrieve the return address for frame UDATA. */
3853 static inline void *
3854 get_return_addr (frame_state
*udata
, frame_state
*sub_udata
)
3856 return __builtin_extract_return_addr
3857 (get_reg (udata
->retaddr_column
, udata
, sub_udata
));
3860 /* Overwrite the return address for frame UDATA with VAL. */
3863 put_return_addr (void *val
, frame_state
*udata
)
3865 val
= __builtin_frob_return_addr (val
);
3866 put_reg (udata
->retaddr_column
, val
, udata
);
3869 /* Given the current frame UDATA and its return address PC, return the
3870 information about the calling frame in CALLER_UDATA and update the
3871 register array in SAVED_REGS. */
3874 next_stack_level (void *pc
, frame_state
*udata
, frame_state
*caller_udata
,
3875 saved_regs_t
*saved_regs
)
3880 /* Collect all of the registers for the current frame. */
3881 for (i
= 0; i
< DWARF_FRAME_REGISTERS
; i
++)
3882 if (udata
->saved
[i
])
3883 saved_regs
->reg
[i
] = get_reg_addr (i
, udata
, caller_udata
);
3885 caller_udata
= __frame_state_for (pc
, caller_udata
);
3889 /* Now go back to our caller's stack frame. If our caller's CFA was
3890 saved in a register in this stack frame or a previous one,
3891 restore it; otherwise, assume CFA register was saved in SP and
3892 restore it to our CFA value. */
3894 p
= saved_regs
->reg
[caller_udata
->cfa_reg
];
3896 caller_udata
->cfa
= (void *)(ptr_type
)*p
;
3898 caller_udata
->cfa
= udata
->cfa
;
3900 if (caller_udata
->indirect
)
3901 caller_udata
->cfa
= * (void **) ((unsigned char *)caller_udata
->cfa
3902 + caller_udata
->base_offset
);
3903 caller_udata
->cfa
+= caller_udata
->cfa_offset
;
3905 return caller_udata
;
3908 /* Hook to call before __terminate if only cleanup handlers remain. */
3910 __unwinding_cleanup (void)
3914 /* throw_helper performs some of the common grunt work for a throw. This
3915 routine is called by throw and rethrows. This is pretty much split
3916 out from the old __throw routine. An addition has been added which allows
3917 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3918 but cleanups remaining. This allows a debugger to examine the state
3919 at which the throw was executed, before any cleanups, rather than
3920 at the terminate point after the stack has been unwound.
3922 EH is the current eh_context structure.
3923 PC is the address of the call to __throw.
3924 MY_UDATA is the unwind information for __throw.
3925 OFFSET_P is where we return the SP adjustment offset. */
3928 throw_helper (struct eh_context
*eh
, void *pc
, frame_state
*my_udata
,
3931 frame_state ustruct2
, *udata
= &ustruct2
;
3932 frame_state ustruct
;
3933 frame_state
*sub_udata
= &ustruct
;
3934 void *saved_pc
= pc
;
3936 void *handler_p
= 0;
3938 void *callee_cfa
= 0;
3939 frame_state saved_ustruct
;
3942 int only_cleanup
= 0;
3944 int saved_state
= 0;
3946 saved_regs_t saved_regs
, cleanup_regs
;
3947 __eh_info
*eh_info
= (__eh_info
*)eh
->info
;
3950 memset (saved_regs
.reg
, 0, sizeof saved_regs
.reg
);
3951 memset (sub_udata
->saved
, REG_UNSAVED
, sizeof sub_udata
->saved
);
3953 /* Do we find a handler based on a re-throw PC? */
3954 if (eh
->table_index
!= (void *) 0)
3957 memcpy (udata
, my_udata
, sizeof (*udata
));
3959 handler
= (void *) 0;
3962 frame_state
*p
= udata
;
3964 udata
= next_stack_level (pc
, udata
, sub_udata
, &saved_regs
);
3967 /* If we couldn't find the next frame, we lose. */
3971 if (udata
->eh_ptr
== NULL
)
3974 new_eh_model
= (((exception_descriptor
*)(udata
->eh_ptr
))->
3975 runtime_id_field
== NEW_EH_RUNTIME
);
3980 handler
= find_exception_handler (eh
->table_index
, udata
->eh_ptr
,
3981 eh_info
, 1, &cleanup
);
3982 eh
->table_index
= (void *)0;
3986 handler
= find_exception_handler (pc
, udata
->eh_ptr
, eh_info
,
3989 handler
= old_find_exception_handler (pc
, udata
->eh_ptr
);
3991 /* If we found one, we can stop searching, if its not a cleanup.
3992 for cleanups, we save the state, and keep looking. This allows
3993 us to call a debug hook if there are nothing but cleanups left. */
3996 /* sub_udata now refers to the frame called by the handler frame. */
4002 saved_ustruct
= *udata
;
4003 cleanup_regs
= saved_regs
;
4004 handler_p
= handler
;
4008 /* Save the CFA of the frame called by the handler
4010 callee_cfa
= sub_udata
->cfa
;
4017 callee_cfa
= sub_udata
->cfa
;
4022 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
4023 hitting the beginning of the next region. */
4024 pc
= get_return_addr (udata
, sub_udata
) - 1;
4029 udata
= &saved_ustruct
;
4030 saved_regs
= cleanup_regs
;
4031 handler
= handler_p
;
4034 __unwinding_cleanup ();
4037 /* If we haven't found a handler by now, this is an unhandled
4042 eh
->handler_label
= handler
;
4044 args_size
= udata
->args_size
;
4046 /* We adjust SP by the difference between __throw's CFA and the CFA for
4047 the frame called by the handler frame, because those CFAs correspond
4048 to the SP values at the two call sites. We need to further adjust by
4049 the args_size of the handler frame itself to get the handler frame's
4050 SP from before the args were pushed for that call. */
4051 #ifdef STACK_GROWS_DOWNWARD
4052 *offset_p
= callee_cfa
- my_udata
->cfa
+ args_size
;
4054 *offset_p
= my_udata
->cfa
- callee_cfa
- args_size
;
4057 /* If we found a handler in the throw context there's no need to
4061 /* Copy saved register values into our register save slots. */
4062 for (i
= 0; i
< DWARF_FRAME_REGISTERS
; i
++)
4063 if (i
!= udata
->retaddr_column
&& saved_regs
.reg
[i
])
4064 copy_reg (i
, saved_regs
.reg
[i
], my_udata
);
4071 /* We first search for an exception handler, and if we don't find
4072 it, we call __terminate on the current stack frame so that we may
4073 use the debugger to walk the stack and understand why no handler
4076 If we find one, then we unwind the frames down to the one that
4077 has the handler and transfer control into the handler. */
4079 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
4084 struct eh_context
*eh
= (*get_eh_context
) ();
4088 /* XXX maybe make my_ustruct static so we don't have to look it up for
4090 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
4092 /* This is required for C++ semantics. We must call terminate if we
4093 try and rethrow an exception, when there is no exception currently
4098 /* Start at our stack frame. */
4100 my_udata
= __frame_state_for (&&label
, my_udata
);
4104 /* We need to get the value from the CFA register. */
4105 my_udata
->cfa
= __builtin_dwarf_cfa ();
4107 /* Do any necessary initialization to access arbitrary stack frames.
4108 On the SPARC, this means flushing the register windows. */
4109 __builtin_unwind_init ();
4111 /* Now reset pc to the right throw point. */
4112 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4114 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
4118 __builtin_eh_return ((void *)eh
, offset
, handler
);
4120 /* Epilogue: restore the handler frame's register values and return
4124 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
4127 __rethrow (void *index
)
4129 struct eh_context
*eh
= (*get_eh_context
) ();
4133 /* XXX maybe make my_ustruct static so we don't have to look it up for
4135 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
4137 /* This is required for C++ semantics. We must call terminate if we
4138 try and rethrow an exception, when there is no exception currently
4143 /* This is the table index we want to rethrow from. The value of
4144 the END_REGION label is used for the PC of the throw, and the
4145 search begins with the next table entry. */
4146 eh
->table_index
= index
;
4148 /* Start at our stack frame. */
4150 my_udata
= __frame_state_for (&&label
, my_udata
);
4154 /* We need to get the value from the CFA register. */
4155 my_udata
->cfa
= __builtin_dwarf_cfa ();
4157 /* Do any necessary initialization to access arbitrary stack frames.
4158 On the SPARC, this means flushing the register windows. */
4159 __builtin_unwind_init ();
4161 /* Now reset pc to the right throw point. */
4162 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4164 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
4168 __builtin_eh_return ((void *)eh
, offset
, handler
);
4170 /* Epilogue: restore the handler frame's register values and return
4173 #endif /* DWARF2_UNWIND_INFO */
4175 #ifdef IA64_UNWIND_INFO
4178 /* Return handler to which we want to transfer control, NULL if we don't
4179 intend to handle this exception here. */
4181 __ia64_personality_v1 (void *pc
, old_exception_table
*table
)
4188 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
4190 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
4192 /* This can apply. Make sure it is at least as small as
4193 the previous best. */
4194 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
4195 && table
[pos
].start_region
>= table
[best
].start_region
))
4198 /* It is sorted by starting PC within a function. */
4199 else if (best
>= 0 && table
[pos
].start_region
> pc
)
4203 return table
[best
].exception_handler
;
4209 ia64_throw_helper (ia64_frame_state
*throw_frame
, ia64_frame_state
*caller
,
4210 void *throw_bsp
, void *throw_sp
)
4212 void *throw_pc
= __builtin_return_address (0);
4213 unwind_info_ptr
*info
;
4214 void *pc
, *handler
= NULL
;
4219 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
4221 /* Start at our stack frame, get our state. */
4222 __build_ia64_frame_state (throw_pc
, throw_frame
, throw_bsp
, throw_sp
,
4225 /* Now we have to find the proper frame for pc, and see if there
4226 is a handler for it. if not, we keep going back frames until
4227 we do find one. Otherwise we call uncaught (). */
4230 memcpy (caller
, throw_frame
, sizeof (*caller
));
4233 void *(*personality
) (void *, old_exception_table
*);
4237 /* We only care about the RP right now, so we dont need to keep
4238 any other information about a call frame right now. */
4239 pc
= __get_real_reg_value (&caller
->rp
) - 1;
4240 bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&caller
->pfs
),
4242 info
= __build_ia64_frame_state (pc
, caller
, bsp
, caller
->my_psp
,
4245 /* If we couldn't find the next frame, we lose. */
4249 personality
= __get_personality (info
);
4250 /* TODO Haven't figured out how to actually load the personality address
4251 yet, so just always default to the one we expect for now. */
4252 if (personality
!= 0)
4253 personality
= __ia64_personality_v1
;
4254 eh_table
= __get_except_table (info
);
4255 /* If there is no personality routine, we'll keep unwinding. */
4257 /* Pass a segment relative PC address to the personality routine,
4258 because the unwind_info section uses segrel relocs. */
4259 handler
= personality ((void *)(pc
- pc_base
), eh_table
);
4265 /* Handler is a segment relative address, so we must adjust it here. */
4266 handler
+= (long) pc_base
;
4268 /* If we found a handler, we need to unwind the stack to that point.
4269 We do this by copying saved values from previous frames into the
4270 save slot for the throw_frame saved slots. when __throw returns,
4271 it'll pickup the correct values. */
4273 /* Start with where __throw saved things, and copy each saved register
4274 of each previous frame until we get to the one before we're
4275 throwing back to. */
4276 memcpy (caller
, throw_frame
, sizeof (*caller
));
4277 for ( ; frame_count
> 0; frame_count
--)
4279 pc
= __get_real_reg_value (&caller
->rp
) - 1;
4280 bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&caller
->pfs
),
4282 __build_ia64_frame_state (pc
, caller
, bsp
, caller
->my_psp
, &pc_base
);
4283 /* Any regs that were saved can be put in the throw frame now. */
4284 /* We don't want to copy any saved register from the
4285 target destination, but we do want to load up it's frame. */
4286 if (frame_count
> 1)
4287 __copy_saved_reg_state (throw_frame
, caller
);
4290 /* Set return address of the throw frame to the handler. */
4291 __set_real_reg_value (&throw_frame
->rp
, handler
);
4293 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4294 /* DO we need to go through the whole loadrs seqeunce? */
4301 register void *stack_pointer
__asm__("r12");
4302 struct eh_context
*eh
= (*get_eh_context
) ();
4303 ia64_frame_state my_frame
;
4304 ia64_frame_state originator
; /* For the context handler is in. */
4305 void *bsp
, *tmp_bsp
;
4308 /* This is required for C++ semantics. We must call terminate if we
4309 try and rethrow an exception, when there is no exception currently
4314 __builtin_unwind_init ();
4316 /* We have to call another routine to actually process the frame
4317 information, which will force all of __throw's local registers into
4320 /* Get the value of ar.bsp while we're here. */
4322 bsp
= __builtin_ia64_bsp ();
4323 ia64_throw_helper (&my_frame
, &originator
, bsp
, stack_pointer
);
4325 /* Now we have to fudge the bsp by the amount in our (__throw)
4326 frame marker, since the return is going to adjust it by that much. */
4328 tmp_bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&my_frame
.pfs
),
4330 offset
= (char *)my_frame
.my_bsp
- (char *)tmp_bsp
;
4331 tmp_bsp
= (char *)originator
.my_bsp
+ offset
;
4333 __builtin_eh_return (tmp_bsp
, offset
, originator
.my_sp
);
4335 /* The return address was already set by throw_helper. */
4338 #endif /* IA64_UNWIND_INFO */