1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
41 /* Don't use `fancy_abort' here even if config.h says to use it. */
48 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
49 #if defined (L_divdi3) || defined (L_moddi3)
61 w
.s
.high
= -uu
.s
.high
- ((UWtype
) w
.s
.low
> 0);
69 __addvsi3 (Wtype a
, Wtype b
)
75 if (b
>= 0 ? w
< a
: w
> a
)
84 __addvdi3 (DWtype a
, DWtype b
)
90 if (b
>= 0 ? w
< a
: w
> a
)
99 __subvsi3 (Wtype a
, Wtype b
)
102 return __addvsi3 (a
, (-b
));
108 if (b
>= 0 ? w
> a
: w
< a
)
118 __subvdi3 (DWtype a
, DWtype b
)
127 if (b
>= 0 ? w
> a
: w
< a
)
137 __mulvsi3 (Wtype a
, Wtype b
)
143 if (((a
>= 0) == (b
>= 0)) ? w
< 0 : w
> 0)
158 if (a
>= 0 ? w
> 0 : w
< 0)
173 if (a
>= 0 ? w
> 0 : w
< 0)
222 __mulvdi3 (DWtype u
, DWtype v
)
228 if (((u
>= 0) == (v
>= 0)) ? w
< 0 : w
> 0)
236 /* Unless shift functions are defined whith full ANSI prototypes,
237 parameter b will be promoted to int if word_type is smaller than an int. */
240 __lshrdi3 (DWtype u
, word_type b
)
251 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
255 w
.s
.low
= (UWtype
) uu
.s
.high
>> -bm
;
259 UWtype carries
= (UWtype
) uu
.s
.high
<< bm
;
261 w
.s
.high
= (UWtype
) uu
.s
.high
>> b
;
262 w
.s
.low
= ((UWtype
) uu
.s
.low
>> b
) | carries
;
271 __ashldi3 (DWtype u
, word_type b
)
282 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
286 w
.s
.high
= (UWtype
) uu
.s
.low
<< -bm
;
290 UWtype carries
= (UWtype
) uu
.s
.low
>> bm
;
292 w
.s
.low
= (UWtype
) uu
.s
.low
<< b
;
293 w
.s
.high
= ((UWtype
) uu
.s
.high
<< b
) | carries
;
302 __ashrdi3 (DWtype u
, word_type b
)
313 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
316 /* w.s.high = 1..1 or 0..0 */
317 w
.s
.high
= uu
.s
.high
>> (sizeof (Wtype
) * BITS_PER_UNIT
- 1);
318 w
.s
.low
= uu
.s
.high
>> -bm
;
322 UWtype carries
= (UWtype
) uu
.s
.high
<< bm
;
324 w
.s
.high
= uu
.s
.high
>> b
;
325 w
.s
.low
= ((UWtype
) uu
.s
.low
>> b
) | carries
;
337 UWtype word
, count
, add
;
341 word
= uu
.s
.low
, add
= 0;
342 else if (uu
.s
.high
!= 0)
343 word
= uu
.s
.high
, add
= BITS_PER_UNIT
* sizeof (Wtype
);
347 count_trailing_zeros (count
, word
);
348 return count
+ add
+ 1;
354 __muldi3 (DWtype u
, DWtype v
)
362 w
.ll
= __umulsidi3 (uu
.s
.low
, vv
.s
.low
);
363 w
.s
.high
+= ((UWtype
) uu
.s
.low
* (UWtype
) vv
.s
.high
364 + (UWtype
) uu
.s
.high
* (UWtype
) vv
.s
.low
);
371 #if defined (sdiv_qrnnd)
373 __udiv_w_sdiv (UWtype
*rp
, UWtype a1
, UWtype a0
, UWtype d
)
380 if (a1
< d
- a1
- (a0
>> (W_TYPE_SIZE
- 1)))
382 /* dividend, divisor, and quotient are nonnegative */
383 sdiv_qrnnd (q
, r
, a1
, a0
, d
);
387 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
388 sub_ddmmss (c1
, c0
, a1
, a0
, d
>> 1, d
<< (W_TYPE_SIZE
- 1));
389 /* Divide (c1*2^32 + c0) by d */
390 sdiv_qrnnd (q
, r
, c1
, c0
, d
);
391 /* Add 2^31 to quotient */
392 q
+= (UWtype
) 1 << (W_TYPE_SIZE
- 1);
397 b1
= d
>> 1; /* d/2, between 2^30 and 2^31 - 1 */
398 c1
= a1
>> 1; /* A/2 */
399 c0
= (a1
<< (W_TYPE_SIZE
- 1)) + (a0
>> 1);
401 if (a1
< b1
) /* A < 2^32*b1, so A/2 < 2^31*b1 */
403 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
405 r
= 2*r
+ (a0
& 1); /* Remainder from A/(2*b1) */
422 else if (c1
< b1
) /* So 2^31 <= (A/2)/b1 < 2^32 */
425 c0
= ~c0
; /* logical NOT */
427 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
429 q
= ~q
; /* (A/2)/b1 */
432 r
= 2*r
+ (a0
& 1); /* A/(2*b1) */
450 else /* Implies c1 = b1 */
451 { /* Hence a1 = d - 1 = 2*b1 - 1 */
469 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
471 __udiv_w_sdiv (UWtype
*rp
__attribute__ ((__unused__
)),
472 UWtype a1
__attribute__ ((__unused__
)),
473 UWtype a0
__attribute__ ((__unused__
)),
474 UWtype d
__attribute__ ((__unused__
)))
481 #if (defined (L_udivdi3) || defined (L_divdi3) || \
482 defined (L_umoddi3) || defined (L_moddi3))
487 const UQItype __clz_tab
[] =
489 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
490 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
491 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
492 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
493 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
494 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
495 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
496 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
502 #if (defined (L_udivdi3) || defined (L_divdi3) || \
503 defined (L_umoddi3) || defined (L_moddi3))
507 __udivmoddi4 (UDWtype n
, UDWtype d
, UDWtype
*rp
)
512 UWtype d0
, d1
, n0
, n1
, n2
;
524 #if !UDIV_NEEDS_NORMALIZATION
531 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
534 /* Remainder in n0. */
541 d0
= 1 / d0
; /* Divide intentionally by zero. */
543 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
544 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
546 /* Remainder in n0. */
557 #else /* UDIV_NEEDS_NORMALIZATION */
565 count_leading_zeros (bm
, d0
);
569 /* Normalize, i.e. make the most significant bit of the
573 n1
= (n1
<< bm
) | (n0
>> (W_TYPE_SIZE
- bm
));
577 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
580 /* Remainder in n0 >> bm. */
587 d0
= 1 / d0
; /* Divide intentionally by zero. */
589 count_leading_zeros (bm
, d0
);
593 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
594 conclude (the most significant bit of n1 is set) /\ (the
595 leading quotient digit q1 = 1).
597 This special case is necessary, not an optimization.
598 (Shifts counts of W_TYPE_SIZE are undefined.) */
607 b
= W_TYPE_SIZE
- bm
;
611 n1
= (n1
<< bm
) | (n0
>> b
);
614 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
619 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
621 /* Remainder in n0 >> bm. */
631 #endif /* UDIV_NEEDS_NORMALIZATION */
642 /* Remainder in n1n0. */
654 count_leading_zeros (bm
, d1
);
657 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
658 conclude (the most significant bit of n1 is set) /\ (the
659 quotient digit q0 = 0 or 1).
661 This special case is necessary, not an optimization. */
663 /* The condition on the next line takes advantage of that
664 n1 >= d1 (true due to program flow). */
665 if (n1
> d1
|| n0
>= d0
)
668 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
687 b
= W_TYPE_SIZE
- bm
;
689 d1
= (d1
<< bm
) | (d0
>> b
);
692 n1
= (n1
<< bm
) | (n0
>> b
);
695 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
696 umul_ppmm (m1
, m0
, q0
, d0
);
698 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
701 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
706 /* Remainder in (n1n0 - m1m0) >> bm. */
709 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
710 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
711 rr
.s
.high
= n1
>> bm
;
726 __divdi3 (DWtype u
, DWtype v
)
737 uu
.ll
= __negdi2 (uu
.ll
);
740 vv
.ll
= __negdi2 (vv
.ll
);
742 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDWtype
*) 0);
752 __moddi3 (DWtype u
, DWtype v
)
763 uu
.ll
= __negdi2 (uu
.ll
);
765 vv
.ll
= __negdi2 (vv
.ll
);
767 (void) __udivmoddi4 (uu
.ll
, vv
.ll
, &w
);
777 __umoddi3 (UDWtype u
, UDWtype v
)
781 (void) __udivmoddi4 (u
, v
, &w
);
789 __udivdi3 (UDWtype n
, UDWtype d
)
791 return __udivmoddi4 (n
, d
, (UDWtype
*) 0);
797 __cmpdi2 (DWtype a
, DWtype b
)
801 au
.ll
= a
, bu
.ll
= b
;
803 if (au
.s
.high
< bu
.s
.high
)
805 else if (au
.s
.high
> bu
.s
.high
)
807 if ((UWtype
) au
.s
.low
< (UWtype
) bu
.s
.low
)
809 else if ((UWtype
) au
.s
.low
> (UWtype
) bu
.s
.low
)
817 __ucmpdi2 (DWtype a
, DWtype b
)
821 au
.ll
= a
, bu
.ll
= b
;
823 if ((UWtype
) au
.s
.high
< (UWtype
) bu
.s
.high
)
825 else if ((UWtype
) au
.s
.high
> (UWtype
) bu
.s
.high
)
827 if ((UWtype
) au
.s
.low
< (UWtype
) bu
.s
.low
)
829 else if ((UWtype
) au
.s
.low
> (UWtype
) bu
.s
.low
)
835 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
836 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
837 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
840 __fixunstfDI (TFtype a
)
848 /* Compute high word of result, as a flonum. */
849 b
= (a
/ HIGH_WORD_COEFF
);
850 /* Convert that to fixed (but not to DWtype!),
851 and shift it into the high word. */
854 /* Remove high part from the TFtype, leaving the low part as flonum. */
856 /* Convert that to fixed (but not to DWtype!) and add it in.
857 Sometimes A comes out negative. This is significant, since
858 A has more bits than a long int does. */
867 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
872 return - __fixunstfDI (-a
);
873 return __fixunstfDI (a
);
877 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
878 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
879 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
882 __fixunsxfDI (XFtype a
)
890 /* Compute high word of result, as a flonum. */
891 b
= (a
/ HIGH_WORD_COEFF
);
892 /* Convert that to fixed (but not to DWtype!),
893 and shift it into the high word. */
896 /* Remove high part from the XFtype, leaving the low part as flonum. */
898 /* Convert that to fixed (but not to DWtype!) and add it in.
899 Sometimes A comes out negative. This is significant, since
900 A has more bits than a long int does. */
909 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
914 return - __fixunsxfDI (-a
);
915 return __fixunsxfDI (a
);
920 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
921 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
924 __fixunsdfDI (DFtype a
)
932 /* Compute high word of result, as a flonum. */
933 b
= (a
/ HIGH_WORD_COEFF
);
934 /* Convert that to fixed (but not to DWtype!),
935 and shift it into the high word. */
938 /* Remove high part from the DFtype, leaving the low part as flonum. */
940 /* Convert that to fixed (but not to DWtype!) and add it in.
941 Sometimes A comes out negative. This is significant, since
942 A has more bits than a long int does. */
956 return - __fixunsdfDI (-a
);
957 return __fixunsdfDI (a
);
962 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
963 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
966 __fixunssfDI (SFtype original_a
)
968 /* Convert the SFtype to a DFtype, because that is surely not going
969 to lose any bits. Some day someone else can write a faster version
970 that avoids converting to DFtype, and verify it really works right. */
971 DFtype a
= original_a
;
978 /* Compute high word of result, as a flonum. */
979 b
= (a
/ HIGH_WORD_COEFF
);
980 /* Convert that to fixed (but not to DWtype!),
981 and shift it into the high word. */
984 /* Remove high part from the DFtype, leaving the low part as flonum. */
986 /* Convert that to fixed (but not to DWtype!) and add it in.
987 Sometimes A comes out negative. This is significant, since
988 A has more bits than a long int does. */
1002 return - __fixunssfDI (-a
);
1003 return __fixunssfDI (a
);
1007 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
1008 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1009 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1010 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1013 __floatdixf (DWtype u
)
1017 d
= (Wtype
) (u
>> WORD_SIZE
);
1018 d
*= HIGH_HALFWORD_COEFF
;
1019 d
*= HIGH_HALFWORD_COEFF
;
1020 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1026 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
1027 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1028 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1029 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1032 __floatditf (DWtype u
)
1036 d
= (Wtype
) (u
>> WORD_SIZE
);
1037 d
*= HIGH_HALFWORD_COEFF
;
1038 d
*= HIGH_HALFWORD_COEFF
;
1039 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1046 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1047 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1048 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1051 __floatdidf (DWtype u
)
1055 d
= (Wtype
) (u
>> WORD_SIZE
);
1056 d
*= HIGH_HALFWORD_COEFF
;
1057 d
*= HIGH_HALFWORD_COEFF
;
1058 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1065 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
1066 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
1067 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
1068 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
1070 /* Define codes for all the float formats that we know of. Note
1071 that this is copied from real.h. */
1073 #define UNKNOWN_FLOAT_FORMAT 0
1074 #define IEEE_FLOAT_FORMAT 1
1075 #define VAX_FLOAT_FORMAT 2
1076 #define IBM_FLOAT_FORMAT 3
1078 /* Default to IEEE float if not specified. Nearly all machines use it. */
1079 #ifndef HOST_FLOAT_FORMAT
1080 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
1083 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1088 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
1093 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
1099 __floatdisf (DWtype u
)
1101 /* Do the calculation in DFmode
1102 so that we don't lose any of the precision of the high word
1103 while multiplying it. */
1106 /* Protect against double-rounding error.
1107 Represent any low-order bits, that might be truncated in DFmode,
1108 by a bit that won't be lost. The bit can go in anywhere below the
1109 rounding position of the SFmode. A fixed mask and bit position
1110 handles all usual configurations. It doesn't handle the case
1111 of 128-bit DImode, however. */
1112 if (DF_SIZE
< DI_SIZE
1113 && DF_SIZE
> (DI_SIZE
- DF_SIZE
+ SF_SIZE
))
1115 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
1116 if (! (- ((DWtype
) 1 << DF_SIZE
) < u
1117 && u
< ((DWtype
) 1 << DF_SIZE
)))
1119 if ((UDWtype
) u
& (REP_BIT
- 1))
1123 f
= (Wtype
) (u
>> WORD_SIZE
);
1124 f
*= HIGH_HALFWORD_COEFF
;
1125 f
*= HIGH_HALFWORD_COEFF
;
1126 f
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
1132 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
1133 /* Reenable the normal types, in case limits.h needs them. */
1146 __fixunsxfSI (XFtype a
)
1148 if (a
>= - (DFtype
) Wtype_MIN
)
1149 return (Wtype
) (a
+ Wtype_MIN
) - Wtype_MIN
;
1155 /* Reenable the normal types, in case limits.h needs them. */
1168 __fixunsdfSI (DFtype a
)
1170 if (a
>= - (DFtype
) Wtype_MIN
)
1171 return (Wtype
) (a
+ Wtype_MIN
) - Wtype_MIN
;
1177 /* Reenable the normal types, in case limits.h needs them. */
1190 __fixunssfSI (SFtype a
)
1192 if (a
>= - (SFtype
) Wtype_MIN
)
1193 return (Wtype
) (a
+ Wtype_MIN
) - Wtype_MIN
;
1198 /* From here on down, the routines use normal data types. */
1200 #define SItype bogus_type
1201 #define USItype bogus_type
1202 #define DItype bogus_type
1203 #define UDItype bogus_type
1204 #define SFtype bogus_type
1205 #define DFtype bogus_type
1223 /* Like bcmp except the sign is meaningful.
1224 Result is negative if S1 is less than S2,
1225 positive if S1 is greater, 0 if S1 and S2 are equal. */
1228 __gcc_bcmp (const unsigned char *s1
, const unsigned char *s2
, size_t size
)
1232 unsigned char c1
= *s1
++, c2
= *s2
++;
1249 #if defined(__svr4__) || defined(__alliant__)
1253 /* The Alliant needs the added underscore. */
1254 asm (".globl __builtin_saveregs");
1255 asm ("__builtin_saveregs:");
1256 asm (".globl ___builtin_saveregs");
1257 asm ("___builtin_saveregs:");
1259 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1260 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1261 area and also for a new va_list
1263 /* Save all argument registers in the arg reg save area. The
1264 arg reg save area must have the following layout (according
1276 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1277 asm (" fst.q %f12,16(%sp)");
1279 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1280 asm (" st.l %r17,36(%sp)");
1281 asm (" st.l %r18,40(%sp)");
1282 asm (" st.l %r19,44(%sp)");
1283 asm (" st.l %r20,48(%sp)");
1284 asm (" st.l %r21,52(%sp)");
1285 asm (" st.l %r22,56(%sp)");
1286 asm (" st.l %r23,60(%sp)");
1287 asm (" st.l %r24,64(%sp)");
1288 asm (" st.l %r25,68(%sp)");
1289 asm (" st.l %r26,72(%sp)");
1290 asm (" st.l %r27,76(%sp)");
1292 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1293 va_list structure. Put in into
1294 r16 so that it will be returned
1297 /* Initialize all fields of the new va_list structure. This
1298 structure looks like:
1301 unsigned long ireg_used;
1302 unsigned long freg_used;
1308 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1309 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1310 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1311 asm (" bri %r1"); /* delayed return */
1312 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1314 #else /* not __svr4__ */
1315 #if defined(__PARAGON__)
1317 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1318 * and we stand a better chance of hooking into libraries
1319 * compiled by PGI. [andyp@ssd.intel.com]
1323 asm (".globl __builtin_saveregs");
1324 asm ("__builtin_saveregs:");
1325 asm (".globl ___builtin_saveregs");
1326 asm ("___builtin_saveregs:");
1328 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1329 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1330 area and also for a new va_list
1332 /* Save all argument registers in the arg reg save area. The
1333 arg reg save area must have the following layout (according
1345 asm (" fst.q f8, 0(sp)");
1346 asm (" fst.q f12,16(sp)");
1347 asm (" st.l r16,32(sp)");
1348 asm (" st.l r17,36(sp)");
1349 asm (" st.l r18,40(sp)");
1350 asm (" st.l r19,44(sp)");
1351 asm (" st.l r20,48(sp)");
1352 asm (" st.l r21,52(sp)");
1353 asm (" st.l r22,56(sp)");
1354 asm (" st.l r23,60(sp)");
1355 asm (" st.l r24,64(sp)");
1356 asm (" st.l r25,68(sp)");
1357 asm (" st.l r26,72(sp)");
1358 asm (" st.l r27,76(sp)");
1360 asm (" adds 80,sp,r16"); /* compute the address of the new
1361 va_list structure. Put in into
1362 r16 so that it will be returned
1365 /* Initialize all fields of the new va_list structure. This
1366 structure looks like:
1369 unsigned long ireg_used;
1370 unsigned long freg_used;
1376 asm (" st.l r0, 0(r16)"); /* nfixed */
1377 asm (" st.l r0, 4(r16)"); /* nfloating */
1378 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1379 asm (" bri r1"); /* delayed return */
1380 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1381 #else /* not __PARAGON__ */
1385 asm (".globl ___builtin_saveregs");
1386 asm ("___builtin_saveregs:");
1387 asm (" mov sp,r30");
1388 asm (" andnot 0x0f,sp,sp");
1389 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1391 /* Fill in the __va_struct. */
1392 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1393 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1394 asm (" st.l r18, 8(sp)");
1395 asm (" st.l r19,12(sp)");
1396 asm (" st.l r20,16(sp)");
1397 asm (" st.l r21,20(sp)");
1398 asm (" st.l r22,24(sp)");
1399 asm (" st.l r23,28(sp)");
1400 asm (" st.l r24,32(sp)");
1401 asm (" st.l r25,36(sp)");
1402 asm (" st.l r26,40(sp)");
1403 asm (" st.l r27,44(sp)");
1405 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1406 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1408 /* Fill in the __va_ctl. */
1409 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1410 asm (" st.l r28,84(sp)"); /* pointer to more args */
1411 asm (" st.l r0, 88(sp)"); /* nfixed */
1412 asm (" st.l r0, 92(sp)"); /* nfloating */
1414 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1416 asm (" mov r30,sp");
1417 /* recover stack and pass address to start
1419 #endif /* not __PARAGON__ */
1420 #endif /* not __svr4__ */
1421 #else /* not __i860__ */
1423 asm (".global __builtin_saveregs");
1424 asm ("__builtin_saveregs:");
1425 asm (".global ___builtin_saveregs");
1426 asm ("___builtin_saveregs:");
1427 #ifdef NEED_PROC_COMMAND
1430 asm ("st %i0,[%fp+68]");
1431 asm ("st %i1,[%fp+72]");
1432 asm ("st %i2,[%fp+76]");
1433 asm ("st %i3,[%fp+80]");
1434 asm ("st %i4,[%fp+84]");
1436 asm ("st %i5,[%fp+88]");
1437 #ifdef NEED_TYPE_COMMAND
1438 asm (".type __builtin_saveregs,#function");
1439 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1441 #else /* not __sparc__ */
1442 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1446 asm (" .set nomips16");
1448 asm (" .ent __builtin_saveregs");
1449 asm (" .globl __builtin_saveregs");
1450 asm ("__builtin_saveregs:");
1451 asm (" sw $4,0($30)");
1452 asm (" sw $5,4($30)");
1453 asm (" sw $6,8($30)");
1454 asm (" sw $7,12($30)");
1456 asm (" .end __builtin_saveregs");
1457 #else /* not __mips__, etc. */
1459 void * ATTRIBUTE_NORETURN
1460 __builtin_saveregs ()
1465 #endif /* not __mips__ */
1466 #endif /* not __sparc__ */
1467 #endif /* not __i860__ */
1471 #ifndef inhibit_libc
1473 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1475 /* This is used by the `assert' macro. */
1477 __eprintf (const char *string
, const char *expression
,
1478 unsigned int line
, const char *filename
)
1480 fprintf (stderr
, string
, expression
, line
, filename
);
1490 /* Structure emitted by -a */
1494 const char *filename
;
1498 const unsigned long *addresses
;
1500 /* Older GCC's did not emit these fields. */
1502 const char **functions
;
1503 const long *line_nums
;
1504 const char **filenames
;
1508 #ifdef BLOCK_PROFILER_CODE
1511 #ifndef inhibit_libc
1513 /* Simple minded basic block profiling output dumper for
1514 systems that don't provide tcov support. At present,
1515 it requires atexit and stdio. */
1517 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1519 char *ctime
PARAMS ((const time_t *));
1521 #include "gbl-ctors.h"
1522 #include "gcov-io.h"
1524 #ifdef TARGET_HAS_F_SETLKW
1529 static struct bb
*bb_head
;
1531 static int num_digits (long value
, int base
) __attribute__ ((const));
1533 /* Return the number of digits needed to print a value */
1534 /* __inline__ */ static int num_digits (long value
, int base
)
1536 int minus
= (value
< 0 && base
!= 16);
1537 unsigned long v
= (minus
) ? -value
: value
;
1551 __bb_exit_func (void)
1553 FILE *da_file
, *file
;
1560 i
= strlen (bb_head
->filename
) - 3;
1562 if (!strcmp (bb_head
->filename
+i
, ".da"))
1564 /* Must be -fprofile-arcs not -a.
1565 Dump data in a form that gcov expects. */
1569 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1573 /* Make sure the output file exists -
1574 but don't clobber exiting data. */
1575 if ((da_file
= fopen (ptr
->filename
, "a")) != 0)
1578 /* Need to re-open in order to be able to write from the start. */
1579 da_file
= fopen (ptr
->filename
, "r+b");
1580 /* Some old systems might not allow the 'b' mode modifier.
1581 Therefore, try to open without it. This can lead to a race
1582 condition so that when you delete and re-create the file, the
1583 file might be opened in text mode, but then, you shouldn't
1584 delete the file in the first place. */
1586 da_file
= fopen (ptr
->filename
, "r+");
1589 fprintf (stderr
, "arc profiling: Can't open output file %s.\n",
1594 /* After a fork, another process might try to read and/or write
1595 the same file simultanously. So if we can, lock the file to
1596 avoid race conditions. */
1597 #if defined (TARGET_HAS_F_SETLKW)
1599 struct flock s_flock
;
1601 s_flock
.l_type
= F_WRLCK
;
1602 s_flock
.l_whence
= SEEK_SET
;
1603 s_flock
.l_start
= 0;
1605 s_flock
.l_pid
= getpid ();
1607 while (fcntl (fileno (da_file
), F_SETLKW
, &s_flock
)
1612 /* If the file is not empty, and the number of counts in it is the
1613 same, then merge them in. */
1614 firstchar
= fgetc (da_file
);
1615 if (firstchar
== EOF
)
1617 if (ferror (da_file
))
1619 fprintf (stderr
, "arc profiling: Can't read output file ");
1620 perror (ptr
->filename
);
1627 if (ungetc (firstchar
, da_file
) == EOF
)
1629 if (__read_long (&n_counts
, da_file
, 8) != 0)
1631 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1636 if (n_counts
== ptr
->ncounts
)
1640 for (i
= 0; i
< n_counts
; i
++)
1644 if (__read_long (&v
, da_file
, 8) != 0)
1646 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1650 ptr
->counts
[i
] += v
;
1658 /* ??? Should first write a header to the file. Preferably, a 4 byte
1659 magic number, 4 bytes containing the time the program was
1660 compiled, 4 bytes containing the last modification time of the
1661 source file, and 4 bytes indicating the compiler options used.
1663 That way we can easily verify that the proper source/executable/
1664 data file combination is being used from gcov. */
1666 if (__write_long (ptr
->ncounts
, da_file
, 8) != 0)
1669 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1675 long *count_ptr
= ptr
->counts
;
1677 for (j
= ptr
->ncounts
; j
> 0; j
--)
1679 if (__write_long (*count_ptr
, da_file
, 8) != 0)
1687 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1691 if (fclose (da_file
) == EOF
)
1692 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1699 /* Must be basic block profiling. Emit a human readable output file. */
1701 file
= fopen ("bb.out", "a");
1710 /* This is somewhat type incorrect, but it avoids worrying about
1711 exactly where time.h is included from. It should be ok unless
1712 a void * differs from other pointer formats, or if sizeof (long)
1713 is < sizeof (time_t). It would be nice if we could assume the
1714 use of rationale standards here. */
1716 time ((void *) &time_value
);
1717 fprintf (file
, "Basic block profiling finished on %s\n", ctime ((void *) &time_value
));
1719 /* We check the length field explicitly in order to allow compatibility
1720 with older GCC's which did not provide it. */
1722 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1725 int func_p
= (ptr
->nwords
>= (long) sizeof (struct bb
)
1726 && ptr
->nwords
<= 1000
1728 int line_p
= (func_p
&& ptr
->line_nums
);
1729 int file_p
= (func_p
&& ptr
->filenames
);
1730 int addr_p
= (ptr
->addresses
!= 0);
1731 long ncounts
= ptr
->ncounts
;
1737 int blk_len
= num_digits (ncounts
, 10);
1742 fprintf (file
, "File %s, %ld basic blocks \n\n",
1743 ptr
->filename
, ncounts
);
1745 /* Get max values for each field. */
1746 for (i
= 0; i
< ncounts
; i
++)
1751 if (cnt_max
< ptr
->counts
[i
])
1752 cnt_max
= ptr
->counts
[i
];
1754 if (addr_p
&& (unsigned long) addr_max
< ptr
->addresses
[i
])
1755 addr_max
= ptr
->addresses
[i
];
1757 if (line_p
&& line_max
< ptr
->line_nums
[i
])
1758 line_max
= ptr
->line_nums
[i
];
1762 p
= (ptr
->functions
[i
]) ? (ptr
->functions
[i
]) : "<none>";
1770 p
= (ptr
->filenames
[i
]) ? (ptr
->filenames
[i
]) : "<none>";
1777 addr_len
= num_digits (addr_max
, 16);
1778 cnt_len
= num_digits (cnt_max
, 10);
1779 line_len
= num_digits (line_max
, 10);
1781 /* Now print out the basic block information. */
1782 for (i
= 0; i
< ncounts
; i
++)
1785 " Block #%*d: executed %*ld time(s)",
1787 cnt_len
, ptr
->counts
[i
]);
1790 fprintf (file
, " address= 0x%.*lx", addr_len
,
1794 fprintf (file
, " function= %-*s", func_len
,
1795 (ptr
->functions
[i
]) ? ptr
->functions
[i
] : "<none>");
1798 fprintf (file
, " line= %*ld", line_len
, ptr
->line_nums
[i
]);
1801 fprintf (file
, " file= %s",
1802 (ptr
->filenames
[i
]) ? ptr
->filenames
[i
] : "<none>");
1804 fprintf (file
, "\n");
1807 fprintf (file
, "\n");
1811 fprintf (file
, "\n\n");
1817 __bb_init_func (struct bb
*blocks
)
1819 /* User is supposed to check whether the first word is non-0,
1820 but just in case.... */
1822 if (blocks
->zero_word
)
1825 /* Initialize destructor. */
1827 atexit (__bb_exit_func
);
1829 /* Set up linked list. */
1830 blocks
->zero_word
= 1;
1831 blocks
->next
= bb_head
;
1835 /* Called before fork or exec - write out profile information gathered so
1836 far and reset it to zero. This avoids duplication or loss of the
1837 profile information gathered so far. */
1839 __bb_fork_func (void)
1844 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1847 for (i
= ptr
->ncounts
- 1; i
>= 0; i
--)
1852 #ifndef MACHINE_STATE_SAVE
1853 #define MACHINE_STATE_SAVE(ID)
1855 #ifndef MACHINE_STATE_RESTORE
1856 #define MACHINE_STATE_RESTORE(ID)
1859 /* Number of buckets in hashtable of basic block addresses. */
1861 #define BB_BUCKETS 311
1863 /* Maximum length of string in file bb.in. */
1865 #define BBINBUFSIZE 500
1869 struct bb_edge
*next
;
1870 unsigned long src_addr
;
1871 unsigned long dst_addr
;
1872 unsigned long count
;
1877 TRACE_KEEP
= 0, TRACE_ON
= 1, TRACE_OFF
= 2
1882 struct bb_func
*next
;
1885 enum bb_func_mode mode
;
1888 /* This is the connection to the outside world.
1889 The BLOCK_PROFILER macro must set __bb.blocks
1890 and __bb.blockno. */
1893 unsigned long blockno
;
1897 /* Vars to store addrs of source and destination basic blocks
1900 static unsigned long bb_src
= 0;
1901 static unsigned long bb_dst
= 0;
1903 static FILE *bb_tracefile
= (FILE *) 0;
1904 static struct bb_edge
**bb_hashbuckets
= (struct bb_edge
**) 0;
1905 static struct bb_func
*bb_func_head
= (struct bb_func
*) 0;
1906 static unsigned long bb_callcount
= 0;
1907 static int bb_mode
= 0;
1909 static unsigned long *bb_stack
= (unsigned long *) 0;
1910 static size_t bb_stacksize
= 0;
1912 static int reported
= 0;
1915 Always : Print execution frequencies of basic blocks
1917 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1918 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1919 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1920 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1925 /*#include <sys/types.h>*/
1926 #include <sys/stat.h>
1927 /*#include <malloc.h>*/
1929 /* Commands executed by gopen. */
1931 #define GOPENDECOMPRESS "gzip -cd "
1932 #define GOPENCOMPRESS "gzip -c >"
1934 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1935 If it does not compile, simply replace gopen by fopen and delete
1936 '.gz' from any first parameter to gopen. */
1939 gopen (char *fn
, char *mode
)
1947 if (mode
[0] != 'r' && mode
[0] != 'w')
1950 p
= fn
+ strlen (fn
)-1;
1951 use_gzip
= ((p
[-1] == '.' && (p
[0] == 'Z' || p
[0] == 'z'))
1952 || (p
[-2] == '.' && p
[-1] == 'g' && p
[0] == 'z'));
1959 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1960 + sizeof (GOPENDECOMPRESS
));
1961 strcpy (s
, GOPENDECOMPRESS
);
1962 strcpy (s
+ (sizeof (GOPENDECOMPRESS
)-1), fn
);
1963 f
= popen (s
, mode
);
1971 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1972 + sizeof (GOPENCOMPRESS
));
1973 strcpy (s
, GOPENCOMPRESS
);
1974 strcpy (s
+ (sizeof (GOPENCOMPRESS
)-1), fn
);
1975 if (!(f
= popen (s
, mode
)))
1976 f
= fopen (s
, mode
);
1983 return fopen (fn
, mode
);
1993 if (!fstat (fileno (f
), &buf
) && S_ISFIFO (buf
.st_mode
))
2001 #endif /* HAVE_POPEN */
2003 /* Called once per program. */
2006 __bb_exit_trace_func (void)
2008 FILE *file
= fopen ("bb.out", "a");
2021 gclose (bb_tracefile
);
2023 fclose (bb_tracefile
);
2024 #endif /* HAVE_POPEN */
2027 /* Check functions in `bb.in'. */
2032 const struct bb_func
*p
;
2033 int printed_something
= 0;
2037 /* This is somewhat type incorrect. */
2038 time ((void *) &time_value
);
2040 for (p
= bb_func_head
; p
!= (struct bb_func
*) 0; p
= p
->next
)
2042 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
2044 if (!ptr
->filename
|| (p
->filename
!= (char *) 0 && strcmp (p
->filename
, ptr
->filename
)))
2046 for (blk
= 0; blk
< ptr
->ncounts
; blk
++)
2048 if (!strcmp (p
->funcname
, ptr
->functions
[blk
]))
2053 if (!printed_something
)
2055 fprintf (file
, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value
));
2056 printed_something
= 1;
2059 fprintf (file
, "\tFunction %s", p
->funcname
);
2061 fprintf (file
, " of file %s", p
->filename
);
2062 fprintf (file
, "\n" );
2067 if (printed_something
)
2068 fprintf (file
, "\n");
2074 if (!bb_hashbuckets
)
2078 fprintf (stderr
, "Profiler: out of memory\n");
2088 unsigned long addr_max
= 0;
2089 unsigned long cnt_max
= 0;
2093 /* This is somewhat type incorrect, but it avoids worrying about
2094 exactly where time.h is included from. It should be ok unless
2095 a void * differs from other pointer formats, or if sizeof (long)
2096 is < sizeof (time_t). It would be nice if we could assume the
2097 use of rationale standards here. */
2099 time ((void *) &time_value
);
2100 fprintf (file
, "Basic block jump tracing");
2102 switch (bb_mode
& 12)
2105 fprintf (file
, " (with call)");
2109 /* Print nothing. */
2113 fprintf (file
, " (with call & ret)");
2117 fprintf (file
, " (with ret)");
2121 fprintf (file
, " finished on %s\n", ctime ((void *) &time_value
));
2123 for (i
= 0; i
< BB_BUCKETS
; i
++)
2125 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2126 for ( ; bucket
; bucket
= bucket
->next
)
2128 if (addr_max
< bucket
->src_addr
)
2129 addr_max
= bucket
->src_addr
;
2130 if (addr_max
< bucket
->dst_addr
)
2131 addr_max
= bucket
->dst_addr
;
2132 if (cnt_max
< bucket
->count
)
2133 cnt_max
= bucket
->count
;
2136 addr_len
= num_digits (addr_max
, 16);
2137 cnt_len
= num_digits (cnt_max
, 10);
2139 for ( i
= 0; i
< BB_BUCKETS
; i
++)
2141 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
2142 for ( ; bucket
; bucket
= bucket
->next
)
2145 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
2146 addr_len
, bucket
->src_addr
,
2147 addr_len
, bucket
->dst_addr
,
2148 cnt_len
, bucket
->count
);
2152 fprintf (file
, "\n");
2160 /* Free allocated memory. */
2165 struct bb_func
*old
= f
;
2168 if (old
->funcname
) free (old
->funcname
);
2169 if (old
->filename
) free (old
->filename
);
2180 for (i
= 0; i
< BB_BUCKETS
; i
++)
2182 struct bb_edge
*old
, *bucket
= bb_hashbuckets
[i
];
2187 bucket
= bucket
->next
;
2191 free (bb_hashbuckets
);
2194 for (b
= bb_head
; b
; b
= b
->next
)
2195 if (b
->flags
) free (b
->flags
);
2198 /* Called once per program. */
2201 __bb_init_prg (void)
2204 char buf
[BBINBUFSIZE
];
2207 enum bb_func_mode m
;
2210 /* Initialize destructor. */
2211 atexit (__bb_exit_func
);
2213 if (!(file
= fopen ("bb.in", "r")))
2216 while(fgets (buf
, BBINBUFSIZE
, file
) != 0)
2232 if (!strcmp (p
, "__bb_trace__"))
2234 else if (!strcmp (p
, "__bb_jumps__"))
2236 else if (!strcmp (p
, "__bb_hidecall__"))
2238 else if (!strcmp (p
, "__bb_showret__"))
2242 struct bb_func
*f
= (struct bb_func
*) malloc (sizeof (struct bb_func
));
2246 f
->next
= bb_func_head
;
2247 if ((pos
= strchr (p
, ':')))
2249 if (!(f
->funcname
= (char *) malloc (strlen (pos
+1)+1)))
2251 strcpy (f
->funcname
, pos
+1);
2253 if ((f
->filename
= (char *) malloc (l
+1)))
2255 strncpy (f
->filename
, p
, l
);
2256 f
->filename
[l
] = '\0';
2259 f
->filename
= (char *) 0;
2263 if (!(f
->funcname
= (char *) malloc (strlen (p
)+1)))
2265 strcpy (f
->funcname
, p
);
2266 f
->filename
= (char *) 0;
2278 bb_tracefile
= gopen ("bbtrace.gz", "w");
2283 bb_tracefile
= fopen ("bbtrace", "w");
2285 #endif /* HAVE_POPEN */
2289 bb_hashbuckets
= (struct bb_edge
**)
2290 malloc (BB_BUCKETS
* sizeof (struct bb_edge
*));
2292 /* Use a loop here rather than calling bzero to avoid having to
2293 conditionalize its existance. */
2294 for (i
= 0; i
< BB_BUCKETS
; i
++)
2295 bb_hashbuckets
[i
] = 0;
2301 bb_stack
= (unsigned long *) malloc (bb_stacksize
* sizeof (*bb_stack
));
2304 /* Initialize destructor. */
2305 atexit (__bb_exit_trace_func
);
2308 /* Called upon entering a basic block. */
2311 __bb_trace_func (void)
2313 struct bb_edge
*bucket
;
2315 MACHINE_STATE_SAVE("1")
2317 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2320 bb_dst
= __bb
.blocks
->addresses
[__bb
.blockno
];
2321 __bb
.blocks
->counts
[__bb
.blockno
]++;
2325 fwrite (&bb_dst
, sizeof (unsigned long), 1, bb_tracefile
);
2330 struct bb_edge
**startbucket
, **oldnext
;
2332 oldnext
= startbucket
2333 = & bb_hashbuckets
[ (((int) bb_src
*8) ^ (int) bb_dst
) % BB_BUCKETS
];
2334 bucket
= *startbucket
;
2336 for (bucket
= *startbucket
; bucket
;
2337 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2339 if (bucket
->src_addr
== bb_src
2340 && bucket
->dst_addr
== bb_dst
)
2343 *oldnext
= bucket
->next
;
2344 bucket
->next
= *startbucket
;
2345 *startbucket
= bucket
;
2350 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2356 fprintf (stderr
, "Profiler: out of memory\n");
2363 bucket
->src_addr
= bb_src
;
2364 bucket
->dst_addr
= bb_dst
;
2365 bucket
->next
= *startbucket
;
2366 *startbucket
= bucket
;
2377 MACHINE_STATE_RESTORE("1")
2381 /* Called when returning from a function and `__bb_showret__' is set. */
2384 __bb_trace_func_ret (void)
2386 struct bb_edge
*bucket
;
2388 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2393 struct bb_edge
**startbucket
, **oldnext
;
2395 oldnext
= startbucket
2396 = & bb_hashbuckets
[ (((int) bb_dst
* 8) ^ (int) bb_src
) % BB_BUCKETS
];
2397 bucket
= *startbucket
;
2399 for (bucket
= *startbucket
; bucket
;
2400 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2402 if (bucket
->src_addr
== bb_dst
2403 && bucket
->dst_addr
== bb_src
)
2406 *oldnext
= bucket
->next
;
2407 bucket
->next
= *startbucket
;
2408 *startbucket
= bucket
;
2413 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2419 fprintf (stderr
, "Profiler: out of memory\n");
2426 bucket
->src_addr
= bb_dst
;
2427 bucket
->dst_addr
= bb_src
;
2428 bucket
->next
= *startbucket
;
2429 *startbucket
= bucket
;
2442 /* Called upon entering the first function of a file. */
2445 __bb_init_file (struct bb
*blocks
)
2448 const struct bb_func
*p
;
2449 long blk
, ncounts
= blocks
->ncounts
;
2450 const char **functions
= blocks
->functions
;
2452 /* Set up linked list. */
2453 blocks
->zero_word
= 1;
2454 blocks
->next
= bb_head
;
2459 || !(blocks
->flags
= (char *) malloc (sizeof (char) * blocks
->ncounts
)))
2462 for (blk
= 0; blk
< ncounts
; blk
++)
2463 blocks
->flags
[blk
] = 0;
2465 for (blk
= 0; blk
< ncounts
; blk
++)
2467 for (p
= bb_func_head
; p
; p
= p
->next
)
2469 if (!strcmp (p
->funcname
, functions
[blk
])
2470 && (!p
->filename
|| !strcmp (p
->filename
, blocks
->filename
)))
2472 blocks
->flags
[blk
] |= p
->mode
;
2479 /* Called when exiting from a function. */
2482 __bb_trace_ret (void)
2485 MACHINE_STATE_SAVE("2")
2489 if ((bb_mode
& 12) && bb_stacksize
> bb_callcount
)
2491 bb_src
= bb_stack
[bb_callcount
];
2493 __bb_trace_func_ret ();
2499 MACHINE_STATE_RESTORE("2")
2503 /* Called when entering a function. */
2506 __bb_init_trace_func (struct bb
*blocks
, unsigned long blockno
)
2508 static int trace_init
= 0;
2510 MACHINE_STATE_SAVE("3")
2512 if (!blocks
->zero_word
)
2519 __bb_init_file (blocks
);
2529 if (bb_callcount
>= bb_stacksize
)
2531 size_t newsize
= bb_callcount
+ 100;
2533 bb_stack
= (unsigned long *) realloc (bb_stack
, newsize
);
2538 fprintf (stderr
, "Profiler: out of memory\n");
2542 goto stack_overflow
;
2544 bb_stacksize
= newsize
;
2546 bb_stack
[bb_callcount
] = bb_src
;
2557 else if (blocks
->flags
&& (blocks
->flags
[blockno
] & TRACE_ON
))
2563 bb_stack
[bb_callcount
] = bb_src
;
2566 MACHINE_STATE_RESTORE("3")
2569 #endif /* not inhibit_libc */
2570 #endif /* not BLOCK_PROFILER_CODE */
2573 #ifdef L_clear_cache
2574 /* Clear part of an instruction cache. */
2576 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2579 __clear_cache (char *beg
__attribute__((__unused__
)),
2580 char *end
__attribute__((__unused__
)))
2582 #ifdef CLEAR_INSN_CACHE
2583 CLEAR_INSN_CACHE (beg
, end
);
2585 #ifdef INSN_CACHE_SIZE
2586 static char array
[INSN_CACHE_SIZE
+ INSN_CACHE_PLANE_SIZE
+ INSN_CACHE_LINE_WIDTH
];
2587 static int initialized
;
2591 typedef (*function_ptr
) (void);
2593 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2594 /* It's cheaper to clear the whole cache.
2595 Put in a series of jump instructions so that calling the beginning
2596 of the cache will clear the whole thing. */
2600 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2601 & -INSN_CACHE_LINE_WIDTH
);
2602 int end_ptr
= ptr
+ INSN_CACHE_SIZE
;
2604 while (ptr
< end_ptr
)
2606 *(INSTRUCTION_TYPE
*)ptr
2607 = JUMP_AHEAD_INSTRUCTION
+ INSN_CACHE_LINE_WIDTH
;
2608 ptr
+= INSN_CACHE_LINE_WIDTH
;
2610 *(INSTRUCTION_TYPE
*) (ptr
- INSN_CACHE_LINE_WIDTH
) = RETURN_INSTRUCTION
;
2615 /* Call the beginning of the sequence. */
2616 (((function_ptr
) (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2617 & -INSN_CACHE_LINE_WIDTH
))
2620 #else /* Cache is large. */
2624 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2625 & -INSN_CACHE_LINE_WIDTH
);
2627 while (ptr
< (int) array
+ sizeof array
)
2629 *(INSTRUCTION_TYPE
*)ptr
= RETURN_INSTRUCTION
;
2630 ptr
+= INSN_CACHE_LINE_WIDTH
;
2636 /* Find the location in array that occupies the same cache line as BEG. */
2638 offset
= ((int) beg
& -INSN_CACHE_LINE_WIDTH
) & (INSN_CACHE_PLANE_SIZE
- 1);
2639 start_addr
= (((int) (array
+ INSN_CACHE_PLANE_SIZE
- 1)
2640 & -INSN_CACHE_PLANE_SIZE
)
2643 /* Compute the cache alignment of the place to stop clearing. */
2644 #if 0 /* This is not needed for gcc's purposes. */
2645 /* If the block to clear is bigger than a cache plane,
2646 we clear the entire cache, and OFFSET is already correct. */
2647 if (end
< beg
+ INSN_CACHE_PLANE_SIZE
)
2649 offset
= (((int) (end
+ INSN_CACHE_LINE_WIDTH
- 1)
2650 & -INSN_CACHE_LINE_WIDTH
)
2651 & (INSN_CACHE_PLANE_SIZE
- 1));
2653 #if INSN_CACHE_DEPTH > 1
2654 end_addr
= (start_addr
& -INSN_CACHE_PLANE_SIZE
) + offset
;
2655 if (end_addr
<= start_addr
)
2656 end_addr
+= INSN_CACHE_PLANE_SIZE
;
2658 for (plane
= 0; plane
< INSN_CACHE_DEPTH
; plane
++)
2660 int addr
= start_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2661 int stop
= end_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2663 while (addr
!= stop
)
2665 /* Call the return instruction at ADDR. */
2666 ((function_ptr
) addr
) ();
2668 addr
+= INSN_CACHE_LINE_WIDTH
;
2671 #else /* just one plane */
2674 /* Call the return instruction at START_ADDR. */
2675 ((function_ptr
) start_addr
) ();
2677 start_addr
+= INSN_CACHE_LINE_WIDTH
;
2679 while ((start_addr
% INSN_CACHE_SIZE
) != offset
);
2680 #endif /* just one plane */
2681 #endif /* Cache is large */
2682 #endif /* Cache exists */
2683 #endif /* CLEAR_INSN_CACHE */
2686 #endif /* L_clear_cache */
2690 /* Jump to a trampoline, loading the static chain address. */
2692 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2705 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2709 mprotect (char *addr
, int len
, int prot
)
2726 if (VirtualProtect (addr
, len
, np
, &op
))
2732 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2734 #ifdef TRANSFER_FROM_TRAMPOLINE
2735 TRANSFER_FROM_TRAMPOLINE
2738 #if defined (NeXT) && defined (__MACH__)
2740 /* Make stack executable so we can call trampolines on stack.
2741 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2745 #include <mach/mach.h>
2749 __enable_execute_stack (char *addr
)
2752 char *eaddr
= addr
+ TRAMPOLINE_SIZE
;
2753 vm_address_t a
= (vm_address_t
) addr
;
2755 /* turn on execute access on stack */
2756 r
= vm_protect (task_self (), a
, TRAMPOLINE_SIZE
, FALSE
, VM_PROT_ALL
);
2757 if (r
!= KERN_SUCCESS
)
2759 mach_error("vm_protect VM_PROT_ALL", r
);
2763 /* We inline the i-cache invalidation for speed */
2765 #ifdef CLEAR_INSN_CACHE
2766 CLEAR_INSN_CACHE (addr
, eaddr
);
2768 __clear_cache ((int) addr
, (int) eaddr
);
2772 #endif /* defined (NeXT) && defined (__MACH__) */
2776 /* Make stack executable so we can call trampolines on stack.
2777 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2779 #include <sys/mman.h>
2780 #include <sys/vmparam.h>
2781 #include <machine/machparam.h>
2784 __enable_execute_stack (void)
2787 static unsigned lowest
= USRSTACK
;
2788 unsigned current
= (unsigned) &fp
& -NBPG
;
2790 if (lowest
> current
)
2792 unsigned len
= lowest
- current
;
2793 mremap (current
, &len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_PRIVATE
);
2797 /* Clear instruction cache in case an old trampoline is in it. */
2800 #endif /* __convex__ */
2804 /* Modified from the convex -code above. */
2806 #include <sys/param.h>
2808 #include <sys/m88kbcs.h>
2811 __enable_execute_stack (void)
2814 static unsigned long lowest
= USRSTACK
;
2815 unsigned long current
= (unsigned long) &save_errno
& -NBPC
;
2817 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2818 address is seen as 'negative'. That is the case with the stack. */
2821 if (lowest
> current
)
2823 unsigned len
=lowest
-current
;
2824 memctl(current
,len
,MCT_TEXT
);
2828 memctl(current
,NBPC
,MCT_TEXT
);
2832 #endif /* __sysV88__ */
2836 #include <sys/signal.h>
2839 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2840 so define it here, because we need it in __clear_insn_cache below */
2841 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2842 hence we enable this stuff only if MCT_TEXT is #define'd. */
2857 /* Clear instruction cache so we can call trampolines on stack.
2858 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2861 __clear_insn_cache (void)
2866 /* Preserve errno, because users would be surprised to have
2867 errno changing without explicitly calling any system-call. */
2870 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2871 No need to use an address derived from _start or %sp, as 0 works also. */
2872 memctl(0, 4096, MCT_TEXT
);
2877 #endif /* __sysV68__ */
2881 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2883 #include <sys/mman.h>
2884 #include <sys/types.h>
2885 #include <sys/param.h>
2886 #include <sys/vmmac.h>
2888 /* Modified from the convex -code above.
2889 mremap promises to clear the i-cache. */
2892 __enable_execute_stack (void)
2895 if (mprotect (((unsigned int)&fp
/PAGSIZ
)*PAGSIZ
, PAGSIZ
,
2896 PROT_READ
|PROT_WRITE
|PROT_EXEC
))
2898 perror ("mprotect in __enable_execute_stack");
2903 #endif /* __pyr__ */
2905 #if defined (sony_news) && defined (SYSTYPE_BSD)
2908 #include <sys/types.h>
2909 #include <sys/param.h>
2910 #include <syscall.h>
2911 #include <machine/sysnews.h>
2913 /* cacheflush function for NEWS-OS 4.2.
2914 This function is called from trampoline-initialize code
2915 defined in config/mips/mips.h. */
2918 cacheflush (char *beg
, int size
, int flag
)
2920 if (syscall (SYS_sysnews
, NEWS_CACHEFLUSH
, beg
, size
, FLUSH_BCACHE
))
2922 perror ("cache_flush");
2928 #endif /* sony_news */
2929 #endif /* L_trampoline */
2934 #include "gbl-ctors.h"
2935 /* Some systems use __main in a way incompatible with its use in gcc, in these
2936 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2937 give the same symbol without quotes for an alternative entry point. You
2938 must define both, or neither. */
2940 #define NAME__MAIN "__main"
2941 #define SYMBOL__MAIN __main
2944 #ifdef INIT_SECTION_ASM_OP
2945 #undef HAS_INIT_SECTION
2946 #define HAS_INIT_SECTION
2949 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2951 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2952 code to run constructors. In that case, we need to handle EH here, too. */
2954 #ifdef EH_FRAME_SECTION
2956 extern unsigned char __EH_FRAME_BEGIN__
[];
2959 /* Run all the global destructors on exit from the program. */
2962 __do_global_dtors (void)
2964 #ifdef DO_GLOBAL_DTORS_BODY
2965 DO_GLOBAL_DTORS_BODY
;
2967 static func_ptr
*p
= __DTOR_LIST__
+ 1;
2974 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2976 static int completed
= 0;
2980 __deregister_frame_info (__EH_FRAME_BEGIN__
);
2987 #ifndef HAS_INIT_SECTION
2988 /* Run all the global constructors on entry to the program. */
2991 __do_global_ctors (void)
2993 #ifdef EH_FRAME_SECTION
2995 static struct object object
;
2996 __register_frame_info (__EH_FRAME_BEGIN__
, &object
);
2999 DO_GLOBAL_CTORS_BODY
;
3000 atexit (__do_global_dtors
);
3002 #endif /* no HAS_INIT_SECTION */
3004 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
3005 /* Subroutine called automatically by `main'.
3006 Compiling a global function named `main'
3007 produces an automatic call to this function at the beginning.
3009 For many systems, this routine calls __do_global_ctors.
3010 For systems which support a .init section we use the .init section
3011 to run __do_global_ctors, so we need not do anything here. */
3016 /* Support recursive calls to `main': run initializers just once. */
3017 static int initialized
;
3021 __do_global_ctors ();
3024 #endif /* no HAS_INIT_SECTION or INVOKE__main */
3026 #endif /* L__main */
3027 #endif /* __CYGWIN__ */
3031 #include "gbl-ctors.h"
3033 /* Provide default definitions for the lists of constructors and
3034 destructors, so that we don't get linker errors. These symbols are
3035 intentionally bss symbols, so that gld and/or collect will provide
3036 the right values. */
3038 /* We declare the lists here with two elements each,
3039 so that they are valid empty lists if no other definition is loaded.
3041 If we are using the old "set" extensions to have the gnu linker
3042 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
3043 must be in the bss/common section.
3045 Long term no port should use those extensions. But many still do. */
3046 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
3047 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
3048 func_ptr __CTOR_LIST__
[2] = {0, 0};
3049 func_ptr __DTOR_LIST__
[2] = {0, 0};
3051 func_ptr __CTOR_LIST__
[2];
3052 func_ptr __DTOR_LIST__
[2];
3054 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
3055 #endif /* L_ctors */
3059 #include "gbl-ctors.h"
3067 static func_ptr
*atexit_chain
= 0;
3068 static long atexit_chain_length
= 0;
3069 static volatile long last_atexit_chain_slot
= -1;
3072 atexit (func_ptr func
)
3074 if (++last_atexit_chain_slot
== atexit_chain_length
)
3076 atexit_chain_length
+= 32;
3078 atexit_chain
= (func_ptr
*) realloc (atexit_chain
, atexit_chain_length
3079 * sizeof (func_ptr
));
3081 atexit_chain
= (func_ptr
*) malloc (atexit_chain_length
3082 * sizeof (func_ptr
));
3085 atexit_chain_length
= 0;
3086 last_atexit_chain_slot
= -1;
3091 atexit_chain
[last_atexit_chain_slot
] = func
;
3095 extern void _cleanup (void);
3096 extern void _exit (int) __attribute__ ((__noreturn__
));
3103 for ( ; last_atexit_chain_slot
-- >= 0; )
3105 (*atexit_chain
[last_atexit_chain_slot
+ 1]) ();
3106 atexit_chain
[last_atexit_chain_slot
+ 1] = 0;
3108 free (atexit_chain
);
3121 /* Simple; we just need a wrapper for ON_EXIT. */
3123 atexit (func_ptr func
)
3125 return ON_EXIT (func
);
3128 #endif /* ON_EXIT */
3129 #endif /* NEED_ATEXIT */
3137 /* Shared exception handling support routines. */
3140 __default_terminate (void)
3145 static __terminate_func_ptr __terminate_func
=
3146 __default_terminate
;
3148 void __attribute__((__noreturn__
))
3151 (*__terminate_func
)();
3154 __terminate_func_ptr
3155 __terminate_set_func (__terminate_func_ptr newfunc
)
3157 __terminate_func_ptr oldfunc
= __terminate_func
;
3159 __terminate_func
= newfunc
;
3164 __throw_type_match (void *catch_type
, void *throw_type
, void *obj
)
3167 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3168 catch_type
, throw_type
);
3170 if (strcmp ((const char *)catch_type
, (const char *)throw_type
) == 0)
3181 /* Include definitions of EH context and table layout */
3183 #include "eh-common.h"
3184 #ifndef inhibit_libc
3188 /* Allocate and return a new EH context structure. */
3192 new_eh_context (void)
3194 struct eh_full_context
{
3195 struct eh_context c
;
3197 } *ehfc
= (struct eh_full_context
*) malloc (sizeof *ehfc
);
3202 memset (ehfc
, 0, sizeof *ehfc
);
3204 ehfc
->c
.dynamic_handler_chain
= (void **) ehfc
->top_elt
;
3206 /* This should optimize out entirely. This should always be true,
3207 but just in case it ever isn't, don't allow bogus code to be
3210 if ((void*)(&ehfc
->c
) != (void*)ehfc
)
3216 static __gthread_key_t eh_context_key
;
3218 /* Destructor for struct eh_context. */
3220 eh_context_free (void *ptr
)
3222 __gthread_key_dtor (eh_context_key
, ptr
);
3228 /* Pointer to function to return EH context. */
3230 static struct eh_context
*eh_context_initialize (void);
3231 static struct eh_context
*eh_context_static (void);
3233 static struct eh_context
*eh_context_specific (void);
3236 static struct eh_context
*(*get_eh_context
) (void) = &eh_context_initialize
;
3238 /* Routine to get EH context.
3239 This one will simply call the function pointer. */
3242 __get_eh_context (void)
3244 return (void *) (*get_eh_context
) ();
3247 /* Get and set the language specific info pointer. */
3250 __get_eh_info (void)
3252 struct eh_context
*eh
= (*get_eh_context
) ();
3256 #ifdef DWARF2_UNWIND_INFO
3257 static int dwarf_reg_size_table_initialized
= 0;
3258 static char dwarf_reg_size_table
[DWARF_FRAME_REGISTERS
];
3261 init_reg_size_table (void)
3263 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table
);
3264 dwarf_reg_size_table_initialized
= 1;
3270 eh_threads_initialize (void)
3272 /* Try to create the key. If it fails, revert to static method,
3273 otherwise start using thread specific EH contexts. */
3274 if (__gthread_key_create (&eh_context_key
, &eh_context_free
) == 0)
3275 get_eh_context
= &eh_context_specific
;
3277 get_eh_context
= &eh_context_static
;
3279 #endif /* no __GTHREADS */
3281 /* Initialize EH context.
3282 This will be called only once, since we change GET_EH_CONTEXT
3283 pointer to another routine. */
3285 static struct eh_context
*
3286 eh_context_initialize (void)
3290 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
3291 /* Make sure that get_eh_context does not point to us anymore.
3292 Some systems have dummy thread routines in their libc that
3293 return a success (Solaris 2.6 for example). */
3294 if (__gthread_once (&once
, eh_threads_initialize
) != 0
3295 || get_eh_context
== &eh_context_initialize
)
3297 /* Use static version of EH context. */
3298 get_eh_context
= &eh_context_static
;
3300 #ifdef DWARF2_UNWIND_INFO
3302 static __gthread_once_t once_regsizes
= __GTHREAD_ONCE_INIT
;
3303 if (__gthread_once (&once_regsizes
, init_reg_size_table
) != 0
3304 || ! dwarf_reg_size_table_initialized
)
3305 init_reg_size_table ();
3309 #else /* no __GTHREADS */
3311 /* Use static version of EH context. */
3312 get_eh_context
= &eh_context_static
;
3314 #ifdef DWARF2_UNWIND_INFO
3315 init_reg_size_table ();
3318 #endif /* no __GTHREADS */
3320 return (*get_eh_context
) ();
3323 /* Return a static EH context. */
3325 static struct eh_context
*
3326 eh_context_static (void)
3328 static struct eh_context eh
;
3329 static int initialized
;
3330 static void *top_elt
[2];
3335 memset (&eh
, 0, sizeof eh
);
3336 eh
.dynamic_handler_chain
= top_elt
;
3342 /* Return a thread specific EH context. */
3344 static struct eh_context
*
3345 eh_context_specific (void)
3347 struct eh_context
*eh
;
3348 eh
= (struct eh_context
*) __gthread_getspecific (eh_context_key
);
3351 eh
= new_eh_context ();
3352 if (__gthread_setspecific (eh_context_key
, (void *) eh
) != 0)
3358 #endif /* __GTHREADS */
3360 /* Support routines for alloc/free during exception handling */
3362 /* __eh_alloc and __eh_free attempt allocation using malloc, but fall back to
3363 the small arena in the eh_context. This is needed because throwing an
3364 out-of-memory exception would fail otherwise. The emergency space is
3365 allocated in blocks of size EH_ALLOC_ALIGN, the
3366 minimum allocation being two blocks. A bitmask indicates which blocks
3367 have been allocated. To indicate the size of an allocation, the bit for
3368 the final block is not set. Hence each allocation is a run of 1s followed
3371 __eh_alloc (size_t size
)
3380 struct eh_context
*eh
= __get_eh_context ();
3381 unsigned blocks
= (size
+ EH_ALLOC_ALIGN
- 1) / EH_ALLOC_ALIGN
;
3382 unsigned real_mask
= eh
->alloc_mask
| (eh
->alloc_mask
<< 1);
3386 if (blocks
> EH_ALLOC_SIZE
/ EH_ALLOC_ALIGN
)
3388 blocks
+= blocks
== 1;
3389 our_mask
= (1 << blocks
) - 1;
3391 for (ix
= EH_ALLOC_SIZE
/ EH_ALLOC_ALIGN
- blocks
; ix
; ix
--)
3392 if (! ((real_mask
>> ix
) & our_mask
))
3394 /* found some space */
3395 p
= &eh
->alloc_buffer
[ix
* EH_ALLOC_ALIGN
];
3396 eh
->alloc_mask
|= (our_mask
>> 1) << ix
;
3404 /* Free the memory for an cp_eh_info and associated exception, given
3405 a pointer to the cp_eh_info. */
3409 struct eh_context
*eh
= __get_eh_context ();
3411 ptrdiff_t diff
= (char *)p
- &eh
->alloc_buffer
[0];
3412 if (diff
>= 0 && diff
< EH_ALLOC_SIZE
)
3414 unsigned mask
= eh
->alloc_mask
;
3415 unsigned bit
= 1 << (diff
/ EH_ALLOC_ALIGN
);
3423 eh
->alloc_mask
= mask
;
3429 /* Support routines for setjmp/longjmp exception handling. */
3431 /* Calls to __sjthrow are generated by the compiler when an exception
3432 is raised when using the setjmp/longjmp exception handling codegen
3435 #ifdef DONT_USE_BUILTIN_SETJMP
3436 extern void longjmp (void *, int);
3439 /* Routine to get the head of the current thread's dynamic handler chain
3440 use for exception handling. */
3443 __get_dynamic_handler_chain (void)
3445 struct eh_context
*eh
= (*get_eh_context
) ();
3446 return &eh
->dynamic_handler_chain
;
3449 /* This is used to throw an exception when the setjmp/longjmp codegen
3450 method is used for exception handling.
3452 We call __terminate if there are no handlers left. Otherwise we run the
3453 cleanup actions off the dynamic cleanup stack, and pop the top of the
3454 dynamic handler chain, and use longjmp to transfer back to the associated
3460 struct eh_context
*eh
= (*get_eh_context
) ();
3461 void ***dhc
= &eh
->dynamic_handler_chain
;
3463 void (*func
)(void *, int);
3465 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3466 void ***cleanup
= (void***)&(*dhc
)[1];
3468 /* If there are any cleanups in the chain, run them now. */
3472 void **buf
= (void**)store
;
3477 #ifdef DONT_USE_BUILTIN_SETJMP
3478 if (! setjmp (&buf
[2]))
3480 if (! __builtin_setjmp (&buf
[2]))
3486 func
= (void(*)(void*, int))cleanup
[0][1];
3487 arg
= (void*)cleanup
[0][2];
3489 /* Update this before running the cleanup. */
3490 cleanup
[0] = (void **)cleanup
[0][0];
3503 /* We must call terminate if we try and rethrow an exception, when
3504 there is no exception currently active and when there are no
3506 if (! eh
->info
|| (*dhc
)[0] == 0)
3509 /* Find the jmpbuf associated with the top element of the dynamic
3510 handler chain. The jumpbuf starts two words into the buffer. */
3511 jmpbuf
= &(*dhc
)[2];
3513 /* Then we pop the top element off the dynamic handler chain. */
3514 *dhc
= (void**)(*dhc
)[0];
3516 /* And then we jump to the handler. */
3518 #ifdef DONT_USE_BUILTIN_SETJMP
3519 longjmp (jmpbuf
, 1);
3521 __builtin_longjmp (jmpbuf
, 1);
3525 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3526 handler, then pop the handler off the dynamic handler stack, and
3527 then throw. This is used to skip the first handler, and transfer
3528 control to the next handler in the dynamic handler stack. */
3531 __sjpopnthrow (void)
3533 struct eh_context
*eh
= (*get_eh_context
) ();
3534 void ***dhc
= &eh
->dynamic_handler_chain
;
3535 void (*func
)(void *, int);
3537 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3538 void ***cleanup
= (void***)&(*dhc
)[1];
3540 /* If there are any cleanups in the chain, run them now. */
3544 void **buf
= (void**)store
;
3549 #ifdef DONT_USE_BUILTIN_SETJMP
3550 if (! setjmp (&buf
[2]))
3552 if (! __builtin_setjmp (&buf
[2]))
3558 func
= (void(*)(void*, int))cleanup
[0][1];
3559 arg
= (void*)cleanup
[0][2];
3561 /* Update this before running the cleanup. */
3562 cleanup
[0] = (void **)cleanup
[0][0];
3575 /* Then we pop the top element off the dynamic handler chain. */
3576 *dhc
= (void**)(*dhc
)[0];
3581 /* Support code for all exception region-based exception handling. */
3584 __eh_rtime_match (void *rtime
)
3587 __eh_matcher matcher
;
3590 info
= *(__get_eh_info ());
3591 matcher
= ((__eh_info
*)info
)->match_function
;
3594 #ifndef inhibit_libc
3595 fprintf (stderr
, "Internal Compiler Bug: No runtime type matcher.");
3599 ret
= (*matcher
) (info
, rtime
, (void *)0);
3600 return (ret
!= NULL
);
3603 /* This value identifies the place from which an exception is being
3606 #ifdef EH_TABLE_LOOKUP
3612 #ifdef DWARF2_UNWIND_INFO
3614 /* Return the table version of an exception descriptor */
3617 __get_eh_table_version (exception_descriptor
*table
)
3619 return table
->lang
.version
;
3622 /* Return the originating table language of an exception descriptor */
3625 __get_eh_table_language (exception_descriptor
*table
)
3627 return table
->lang
.language
;
3630 /* This routine takes a PC and a pointer to the exception region TABLE for
3631 its translation unit, and returns the address of the exception handler
3632 associated with the closest exception table handler entry associated
3633 with that PC, or 0 if there are no table entries the PC fits in.
3635 In the advent of a tie, we have to give the last entry, as it represents
3639 old_find_exception_handler (void *pc
, old_exception_table
*table
)
3646 /* We can't do a binary search because the table isn't guaranteed
3647 to be sorted from function to function. */
3648 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3650 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3652 /* This can apply. Make sure it is at least as small as
3653 the previous best. */
3654 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3655 && table
[pos
].start_region
>= table
[best
].start_region
))
3658 /* But it is sorted by starting PC within a function. */
3659 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3663 return table
[best
].exception_handler
;
3669 /* find_exception_handler finds the correct handler, if there is one, to
3670 handle an exception.
3671 returns a pointer to the handler which controlled should be transferred
3672 to, or NULL if there is nothing left.
3674 PC - pc where the exception originates. If this is a rethrow,
3675 then this starts out as a pointer to the exception table
3676 entry we wish to rethrow out of.
3677 TABLE - exception table for the current module.
3678 EH_INFO - eh info pointer for this exception.
3679 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3680 CLEANUP - returned flag indicating whether this is a cleanup handler.
3683 find_exception_handler (void *pc
, exception_descriptor
*table
,
3684 __eh_info
*eh_info
, int rethrow
, int *cleanup
)
3687 void *retval
= NULL
;
3692 /* The new model assumed the table is sorted inner-most out so the
3693 first region we find which matches is the correct one */
3695 exception_table
*tab
= &(table
->table
[0]);
3697 /* Subtract 1 from the PC to avoid hitting the next region */
3700 /* pc is actually the region table entry to rethrow out of */
3701 pos
= ((exception_table
*) pc
) - tab
;
3702 pc
= ((exception_table
*) pc
)->end_region
- 1;
3704 /* The label is always on the LAST handler entry for a region,
3705 so we know the next entry is a different region, even if the
3706 addresses are the same. Make sure its not end of table tho. */
3707 if (tab
[pos
].start_region
!= (void *) -1)
3713 /* We can't do a binary search because the table is in inner-most
3714 to outermost address ranges within functions */
3715 for ( ; tab
[pos
].start_region
!= (void *) -1; pos
++)
3717 if (tab
[pos
].start_region
<= pc
&& tab
[pos
].end_region
> pc
)
3719 if (tab
[pos
].match_info
)
3721 __eh_matcher matcher
= eh_info
->match_function
;
3722 /* match info but no matcher is NOT a match */
3725 void *ret
= (*matcher
)((void *) eh_info
,
3726 tab
[pos
].match_info
, table
);
3730 retval
= tab
[pos
].exception_handler
;
3739 retval
= tab
[pos
].exception_handler
;
3746 #endif /* DWARF2_UNWIND_INFO */
3747 #endif /* EH_TABLE_LOOKUP */
3749 #ifdef DWARF2_UNWIND_INFO
3750 /* Support code for exception handling using static unwind information. */
3754 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3755 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3756 avoid a warning about casting between int and pointer of different
3759 typedef int ptr_type
__attribute__ ((mode (pointer
)));
3763 word_type
*reg
[DWARF_FRAME_REGISTERS
];
3766 #ifdef INCOMING_REGNO
3767 /* Is the saved value for register REG in frame UDATA stored in a register
3768 window in the previous frame? */
3770 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3771 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3772 compiled functions won't work with the frame-unwind stuff here.
3773 Perhaps the entireity of in_reg_window should be conditional on having
3774 seen a DW_CFA_GNU_window_save? */
3775 #define target_flags 0
3778 in_reg_window (int reg
, frame_state
*udata
)
3780 if (udata
->saved
[reg
] == REG_SAVED_REG
)
3781 return INCOMING_REGNO (reg
) == reg
;
3782 if (udata
->saved
[reg
] != REG_SAVED_OFFSET
)
3785 #ifdef STACK_GROWS_DOWNWARD
3786 return udata
->reg_or_offset
[reg
] > 0;
3788 return udata
->reg_or_offset
[reg
] < 0;
3793 in_reg_window (int reg
__attribute__ ((__unused__
)),
3794 frame_state
*udata
__attribute__ ((__unused__
)))
3798 #endif /* INCOMING_REGNO */
3800 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3801 frame called by UDATA or 0. */
3804 get_reg_addr (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3806 while (udata
->saved
[reg
] == REG_SAVED_REG
)
3808 reg
= udata
->reg_or_offset
[reg
];
3809 if (in_reg_window (reg
, udata
))
3815 if (udata
->saved
[reg
] == REG_SAVED_OFFSET
)
3816 return (word_type
*)(udata
->cfa
+ udata
->reg_or_offset
[reg
]);
3818 /* We don't have a saved copy of this register. */
3822 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3823 frame called by UDATA or 0. */
3825 static inline void *
3826 get_reg (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3828 return (void *)(ptr_type
) *get_reg_addr (reg
, udata
, sub_udata
);
3831 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3834 put_reg (unsigned reg
, void *val
, frame_state
*udata
)
3836 *get_reg_addr (reg
, udata
, NULL
) = (word_type
)(ptr_type
) val
;
3839 /* Copy the saved value for register REG from PTREG to frame
3840 TARGET_UDATA. Unlike the previous two functions, this can handle
3841 registers that are not one word large. */
3844 copy_reg (unsigned reg
, word_type
*preg
, frame_state
*target_udata
)
3846 word_type
*ptreg
= get_reg_addr (reg
, target_udata
, NULL
);
3847 memcpy (ptreg
, preg
, dwarf_reg_size_table
[reg
]);
3850 /* Retrieve the return address for frame UDATA. */
3852 static inline void *
3853 get_return_addr (frame_state
*udata
, frame_state
*sub_udata
)
3855 return __builtin_extract_return_addr
3856 (get_reg (udata
->retaddr_column
, udata
, sub_udata
));
3859 /* Overwrite the return address for frame UDATA with VAL. */
3862 put_return_addr (void *val
, frame_state
*udata
)
3864 val
= __builtin_frob_return_addr (val
);
3865 put_reg (udata
->retaddr_column
, val
, udata
);
3868 /* Given the current frame UDATA and its return address PC, return the
3869 information about the calling frame in CALLER_UDATA and update the
3870 register array in SAVED_REGS. */
3873 next_stack_level (void *pc
, frame_state
*udata
, frame_state
*caller_udata
,
3874 saved_regs_t
*saved_regs
)
3879 /* Collect all of the registers for the current frame. */
3880 for (i
= 0; i
< DWARF_FRAME_REGISTERS
; i
++)
3881 if (udata
->saved
[i
])
3882 saved_regs
->reg
[i
] = get_reg_addr (i
, udata
, caller_udata
);
3884 caller_udata
= __frame_state_for (pc
, caller_udata
);
3888 /* Now go back to our caller's stack frame. If our caller's CFA was
3889 saved in a register in this stack frame or a previous one, restore it;
3890 otherwise, assume CFA register is SP and restore it to our CFA value
3891 (which is defined to be the value of SP in the caller's frame). */
3893 p
= saved_regs
->reg
[caller_udata
->cfa_reg
];
3895 caller_udata
->cfa
= (void *)(ptr_type
)*p
;
3897 caller_udata
->cfa
= udata
->cfa
;
3899 if (caller_udata
->indirect
)
3900 caller_udata
->cfa
= * (void **) ((unsigned char *)caller_udata
->cfa
3901 + caller_udata
->base_offset
);
3902 caller_udata
->cfa
+= caller_udata
->cfa_offset
;
3904 return caller_udata
;
3907 /* Hook to call before __terminate if only cleanup handlers remain. */
3909 __unwinding_cleanup (void)
3913 /* throw_helper performs some of the common grunt work for a throw. This
3914 routine is called by throw and rethrows. This is pretty much split
3915 out from the old __throw routine. An addition has been added which allows
3916 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3917 but cleanups remaining. This allows a debugger to examine the state
3918 at which the throw was executed, before any cleanups, rather than
3919 at the terminate point after the stack has been unwound.
3921 EH is the current eh_context structure.
3922 PC is the address of the call to __throw.
3923 MY_UDATA is the unwind information for __throw.
3924 OFFSET_P is where we return the SP adjustment offset. */
3927 throw_helper (struct eh_context
*eh
, void *pc
, frame_state
*my_udata
,
3930 frame_state ustruct2
, *udata
= &ustruct2
;
3931 frame_state ustruct
;
3932 frame_state
*sub_udata
= &ustruct
;
3933 void *saved_pc
= pc
;
3935 void *handler_p
= 0;
3937 void *callee_cfa
= 0;
3938 frame_state saved_ustruct
;
3941 int only_cleanup
= 0;
3943 int saved_state
= 0;
3945 saved_regs_t saved_regs
, cleanup_regs
;
3946 __eh_info
*eh_info
= (__eh_info
*)eh
->info
;
3949 memset (saved_regs
.reg
, 0, sizeof saved_regs
.reg
);
3950 memset (sub_udata
->saved
, REG_UNSAVED
, sizeof sub_udata
->saved
);
3952 /* Do we find a handler based on a re-throw PC? */
3953 if (eh
->table_index
!= (void *) 0)
3956 memcpy (udata
, my_udata
, sizeof (*udata
));
3958 handler
= (void *) 0;
3961 frame_state
*p
= udata
;
3963 udata
= next_stack_level (pc
, udata
, sub_udata
, &saved_regs
);
3966 /* If we couldn't find the next frame, we lose. */
3970 if (udata
->eh_ptr
== NULL
)
3973 new_eh_model
= (((exception_descriptor
*)(udata
->eh_ptr
))->
3974 runtime_id_field
== NEW_EH_RUNTIME
);
3979 handler
= find_exception_handler (eh
->table_index
, udata
->eh_ptr
,
3980 eh_info
, 1, &cleanup
);
3981 eh
->table_index
= (void *)0;
3985 handler
= find_exception_handler (pc
, udata
->eh_ptr
, eh_info
,
3988 handler
= old_find_exception_handler (pc
, udata
->eh_ptr
);
3990 /* If we found one, we can stop searching, if its not a cleanup.
3991 for cleanups, we save the state, and keep looking. This allows
3992 us to call a debug hook if there are nothing but cleanups left. */
3995 /* sub_udata now refers to the frame called by the handler frame. */
4001 saved_ustruct
= *udata
;
4002 cleanup_regs
= saved_regs
;
4003 handler_p
= handler
;
4007 /* Save the CFA of the frame called by the handler
4009 callee_cfa
= sub_udata
->cfa
;
4016 callee_cfa
= sub_udata
->cfa
;
4021 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
4022 hitting the beginning of the next region. */
4023 pc
= get_return_addr (udata
, sub_udata
) - 1;
4028 udata
= &saved_ustruct
;
4029 saved_regs
= cleanup_regs
;
4030 handler
= handler_p
;
4033 __unwinding_cleanup ();
4036 /* If we haven't found a handler by now, this is an unhandled
4041 eh
->handler_label
= handler
;
4043 args_size
= udata
->args_size
;
4045 /* We adjust SP by the difference between __throw's CFA and the CFA for
4046 the frame called by the handler frame, because those CFAs correspond
4047 to the SP values at the two call sites. We need to further adjust by
4048 the args_size of the handler frame itself to get the handler frame's
4049 SP from before the args were pushed for that call. */
4050 #ifdef STACK_GROWS_DOWNWARD
4051 *offset_p
= callee_cfa
- my_udata
->cfa
+ args_size
;
4053 *offset_p
= my_udata
->cfa
- callee_cfa
- args_size
;
4056 /* If we found a handler in the throw context there's no need to
4060 /* Copy saved register values into our register save slots. */
4061 for (i
= 0; i
< DWARF_FRAME_REGISTERS
; i
++)
4062 if (i
!= udata
->retaddr_column
&& saved_regs
.reg
[i
])
4063 copy_reg (i
, saved_regs
.reg
[i
], my_udata
);
4070 /* We first search for an exception handler, and if we don't find
4071 it, we call __terminate on the current stack frame so that we may
4072 use the debugger to walk the stack and understand why no handler
4075 If we find one, then we unwind the frames down to the one that
4076 has the handler and transfer control into the handler. */
4078 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
4083 struct eh_context
*eh
= (*get_eh_context
) ();
4087 /* XXX maybe make my_ustruct static so we don't have to look it up for
4089 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
4091 /* This is required for C++ semantics. We must call terminate if we
4092 try and rethrow an exception, when there is no exception currently
4097 /* Start at our stack frame. */
4099 my_udata
= __frame_state_for (&&label
, my_udata
);
4103 /* We need to get the value from the CFA register. */
4104 my_udata
->cfa
= __builtin_dwarf_cfa ();
4106 /* Do any necessary initialization to access arbitrary stack frames.
4107 On the SPARC, this means flushing the register windows. */
4108 __builtin_unwind_init ();
4110 /* Now reset pc to the right throw point. The return address points to
4111 the instruction after the call to __throw; we subtract 1 so that pc
4112 points into the call insn itself. Since we work with PC ranges (as
4113 opposed to specific call sites), it isn't important for it to point to
4114 the very beginning of the call insn, and making it do so would be
4115 hard on targets with variable length call insns. */
4116 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4118 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
4122 __builtin_eh_return ((void *)eh
, offset
, handler
);
4124 /* Epilogue: restore the handler frame's register values and return
4128 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
4131 __rethrow (void *index
)
4133 struct eh_context
*eh
= (*get_eh_context
) ();
4137 /* XXX maybe make my_ustruct static so we don't have to look it up for
4139 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
4141 /* This is required for C++ semantics. We must call terminate if we
4142 try and rethrow an exception, when there is no exception currently
4147 /* This is the table index we want to rethrow from. The value of
4148 the END_REGION label is used for the PC of the throw, and the
4149 search begins with the next table entry. */
4150 eh
->table_index
= index
;
4152 /* Start at our stack frame. */
4154 my_udata
= __frame_state_for (&&label
, my_udata
);
4158 /* We need to get the value from the CFA register. */
4159 my_udata
->cfa
= __builtin_dwarf_cfa ();
4161 /* Do any necessary initialization to access arbitrary stack frames.
4162 On the SPARC, this means flushing the register windows. */
4163 __builtin_unwind_init ();
4165 /* Now reset pc to the right throw point. The return address points to
4166 the instruction after the call to __throw; we subtract 1 so that pc
4167 points into the call insn itself. Since we work with PC ranges (as
4168 opposed to specific call sites), it isn't important for it to point to
4169 the very beginning of the call insn, and making it do so would be
4170 hard on targets with variable length call insns. */
4171 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
4173 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
4177 __builtin_eh_return ((void *)eh
, offset
, handler
);
4179 /* Epilogue: restore the handler frame's register values and return
4182 #endif /* DWARF2_UNWIND_INFO */
4184 #ifdef IA64_UNWIND_INFO
4187 /* Return handler to which we want to transfer control, NULL if we don't
4188 intend to handle this exception here. */
4190 __ia64_personality_v1 (void *pc
, old_exception_table
*table
)
4197 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
4199 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
4201 /* This can apply. Make sure it is at least as small as
4202 the previous best. */
4203 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
4204 && table
[pos
].start_region
>= table
[best
].start_region
))
4207 /* It is sorted by starting PC within a function. */
4208 else if (best
>= 0 && table
[pos
].start_region
> pc
)
4212 return table
[best
].exception_handler
;
4218 ia64_throw_helper (ia64_frame_state
*throw_frame
, ia64_frame_state
*caller
,
4219 void *throw_bsp
, void *throw_sp
)
4221 void *throw_pc
= __builtin_return_address (0);
4222 unwind_info_ptr
*info
;
4223 void *pc
, *handler
= NULL
;
4228 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
4230 /* Start at our stack frame, get our state. */
4231 __build_ia64_frame_state (throw_pc
, throw_frame
, throw_bsp
, throw_sp
,
4234 /* Now we have to find the proper frame for pc, and see if there
4235 is a handler for it. if not, we keep going back frames until
4236 we do find one. Otherwise we call uncaught (). */
4239 memcpy (caller
, throw_frame
, sizeof (*caller
));
4242 void *(*personality
) (void *, old_exception_table
*);
4246 /* We only care about the RP right now, so we dont need to keep
4247 any other information about a call frame right now. */
4248 pc
= __get_real_reg_value (&caller
->rp
) - 1;
4249 bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&caller
->pfs
),
4251 info
= __build_ia64_frame_state (pc
, caller
, bsp
, caller
->my_psp
,
4254 /* If we couldn't find the next frame, we lose. */
4258 personality
= __get_personality (info
);
4259 /* TODO Haven't figured out how to actually load the personality address
4260 yet, so just always default to the one we expect for now. */
4261 if (personality
!= 0)
4262 personality
= __ia64_personality_v1
;
4263 eh_table
= __get_except_table (info
);
4264 /* If there is no personality routine, we'll keep unwinding. */
4266 /* Pass a segment relative PC address to the personality routine,
4267 because the unwind_info section uses segrel relocs. */
4268 handler
= personality ((void *)(pc
- pc_base
), eh_table
);
4274 /* Handler is a segment relative address, so we must adjust it here. */
4275 handler
+= (long) pc_base
;
4277 /* If we found a handler, we need to unwind the stack to that point.
4278 We do this by copying saved values from previous frames into the
4279 save slot for the throw_frame saved slots. when __throw returns,
4280 it'll pickup the correct values. */
4282 /* Start with where __throw saved things, and copy each saved register
4283 of each previous frame until we get to the one before we're
4284 throwing back to. */
4285 memcpy (caller
, throw_frame
, sizeof (*caller
));
4286 for ( ; frame_count
> 0; frame_count
--)
4288 pc
= __get_real_reg_value (&caller
->rp
) - 1;
4289 bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&caller
->pfs
),
4291 __build_ia64_frame_state (pc
, caller
, bsp
, caller
->my_psp
, &pc_base
);
4292 /* Any regs that were saved can be put in the throw frame now. */
4293 /* We don't want to copy any saved register from the
4294 target destination, but we do want to load up it's frame. */
4295 if (frame_count
> 1)
4296 __copy_saved_reg_state (throw_frame
, caller
);
4299 /* Set return address of the throw frame to the handler. */
4300 __set_real_reg_value (&throw_frame
->rp
, handler
);
4302 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4303 /* DO we need to go through the whole loadrs seqeunce? */
4310 register void *stack_pointer
__asm__("r12");
4311 struct eh_context
*eh
= (*get_eh_context
) ();
4312 ia64_frame_state my_frame
;
4313 ia64_frame_state originator
; /* For the context handler is in. */
4314 void *bsp
, *tmp_bsp
;
4317 /* This is required for C++ semantics. We must call terminate if we
4318 try and rethrow an exception, when there is no exception currently
4323 __builtin_unwind_init ();
4325 /* We have to call another routine to actually process the frame
4326 information, which will force all of __throw's local registers into
4329 /* Get the value of ar.bsp while we're here. */
4331 bsp
= __builtin_ia64_bsp ();
4332 ia64_throw_helper (&my_frame
, &originator
, bsp
, stack_pointer
);
4334 /* Now we have to fudge the bsp by the amount in our (__throw)
4335 frame marker, since the return is going to adjust it by that much. */
4337 tmp_bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&my_frame
.pfs
),
4339 offset
= (char *)my_frame
.my_bsp
- (char *)tmp_bsp
;
4340 tmp_bsp
= (char *)originator
.my_bsp
+ offset
;
4342 __builtin_eh_return (tmp_bsp
, offset
, originator
.my_sp
);
4344 /* The return address was already set by throw_helper. */
4347 #endif /* IA64_UNWIND_INFO */