1 /* More subroutines needed by GCC output code on some machines. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989, 92, 93, 94, 95, 96, 97, 98, 1999, 2000
4 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GNU CC is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 GNU General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with GNU CC; see the file COPYING. If not, write to
29 the Free Software Foundation, 59 Temple Place - Suite 330,
30 Boston, MA 02111-1307, USA. */
32 /* It is incorrect to include config.h here, because this file is being
33 compiled for the target, and hence definitions concerning only the host
42 /* Don't use `fancy_abort' here even if config.h says to use it. */
49 #if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
50 #if defined (L_divdi3) || defined (L_moddi3)
62 w
.s
.high
= -uu
.s
.high
- ((UWtype
) w
.s
.low
> 0);
68 /* Unless shift functions are defined whith full ANSI prototypes,
69 parameter b will be promoted to int if word_type is smaller than an int. */
72 __lshrdi3 (DWtype u
, word_type b
)
83 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
87 w
.s
.low
= (UWtype
) uu
.s
.high
>> -bm
;
91 UWtype carries
= (UWtype
) uu
.s
.high
<< bm
;
93 w
.s
.high
= (UWtype
) uu
.s
.high
>> b
;
94 w
.s
.low
= ((UWtype
) uu
.s
.low
>> b
) | carries
;
103 __ashldi3 (DWtype u
, word_type b
)
114 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
118 w
.s
.high
= (UWtype
) uu
.s
.low
<< -bm
;
122 UWtype carries
= (UWtype
) uu
.s
.low
>> bm
;
124 w
.s
.low
= (UWtype
) uu
.s
.low
<< b
;
125 w
.s
.high
= ((UWtype
) uu
.s
.high
<< b
) | carries
;
134 __ashrdi3 (DWtype u
, word_type b
)
145 bm
= (sizeof (Wtype
) * BITS_PER_UNIT
) - b
;
148 /* w.s.high = 1..1 or 0..0 */
149 w
.s
.high
= uu
.s
.high
>> (sizeof (Wtype
) * BITS_PER_UNIT
- 1);
150 w
.s
.low
= uu
.s
.high
>> -bm
;
154 UWtype carries
= (UWtype
) uu
.s
.high
<< bm
;
156 w
.s
.high
= uu
.s
.high
>> b
;
157 w
.s
.low
= ((UWtype
) uu
.s
.low
>> b
) | carries
;
171 w
.s
.low
= ffs (uu
.s
.low
);
174 w
.s
.low
= ffs (uu
.s
.high
);
177 w
.s
.low
+= BITS_PER_UNIT
* sizeof (Wtype
);
186 __muldi3 (DWtype u
, DWtype v
)
194 w
.ll
= __umulsidi3 (uu
.s
.low
, vv
.s
.low
);
195 w
.s
.high
+= ((UWtype
) uu
.s
.low
* (UWtype
) vv
.s
.high
196 + (UWtype
) uu
.s
.high
* (UWtype
) vv
.s
.low
);
203 #if defined (sdiv_qrnnd)
205 __udiv_w_sdiv (UWtype
*rp
, UWtype a1
, UWtype a0
, UWtype d
)
212 if (a1
< d
- a1
- (a0
>> (W_TYPE_SIZE
- 1)))
214 /* dividend, divisor, and quotient are nonnegative */
215 sdiv_qrnnd (q
, r
, a1
, a0
, d
);
219 /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
220 sub_ddmmss (c1
, c0
, a1
, a0
, d
>> 1, d
<< (W_TYPE_SIZE
- 1));
221 /* Divide (c1*2^32 + c0) by d */
222 sdiv_qrnnd (q
, r
, c1
, c0
, d
);
223 /* Add 2^31 to quotient */
224 q
+= (UWtype
) 1 << (W_TYPE_SIZE
- 1);
229 b1
= d
>> 1; /* d/2, between 2^30 and 2^31 - 1 */
230 c1
= a1
>> 1; /* A/2 */
231 c0
= (a1
<< (W_TYPE_SIZE
- 1)) + (a0
>> 1);
233 if (a1
< b1
) /* A < 2^32*b1, so A/2 < 2^31*b1 */
235 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
237 r
= 2*r
+ (a0
& 1); /* Remainder from A/(2*b1) */
254 else if (c1
< b1
) /* So 2^31 <= (A/2)/b1 < 2^32 */
257 c0
= ~c0
; /* logical NOT */
259 sdiv_qrnnd (q
, r
, c1
, c0
, b1
); /* (A/2) / (d/2) */
261 q
= ~q
; /* (A/2)/b1 */
264 r
= 2*r
+ (a0
& 1); /* A/(2*b1) */
282 else /* Implies c1 = b1 */
283 { /* Hence a1 = d - 1 = 2*b1 - 1 */
301 /* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
303 __udiv_w_sdiv (UWtype
*rp
__attribute__ ((__unused__
)),
304 UWtype a1
__attribute__ ((__unused__
)),
305 UWtype a0
__attribute__ ((__unused__
)),
306 UWtype d
__attribute__ ((__unused__
)))
313 #if (defined (L_udivdi3) || defined (L_divdi3) || \
314 defined (L_umoddi3) || defined (L_moddi3))
319 static const UQItype __clz_tab
[] =
321 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
322 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
323 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
324 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
325 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
326 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
327 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
328 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
331 #if (defined (L_udivdi3) || defined (L_divdi3) || \
332 defined (L_umoddi3) || defined (L_moddi3))
336 __udivmoddi4 (UDWtype n
, UDWtype d
, UDWtype
*rp
)
341 UWtype d0
, d1
, n0
, n1
, n2
;
353 #if !UDIV_NEEDS_NORMALIZATION
360 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
363 /* Remainder in n0. */
370 d0
= 1 / d0
; /* Divide intentionally by zero. */
372 udiv_qrnnd (q1
, n1
, 0, n1
, d0
);
373 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
375 /* Remainder in n0. */
386 #else /* UDIV_NEEDS_NORMALIZATION */
394 count_leading_zeros (bm
, d0
);
398 /* Normalize, i.e. make the most significant bit of the
402 n1
= (n1
<< bm
) | (n0
>> (W_TYPE_SIZE
- bm
));
406 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
409 /* Remainder in n0 >> bm. */
416 d0
= 1 / d0
; /* Divide intentionally by zero. */
418 count_leading_zeros (bm
, d0
);
422 /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
423 conclude (the most significant bit of n1 is set) /\ (the
424 leading quotient digit q1 = 1).
426 This special case is necessary, not an optimization.
427 (Shifts counts of W_TYPE_SIZE are undefined.) */
436 b
= W_TYPE_SIZE
- bm
;
440 n1
= (n1
<< bm
) | (n0
>> b
);
443 udiv_qrnnd (q1
, n1
, n2
, n1
, d0
);
448 udiv_qrnnd (q0
, n0
, n1
, n0
, d0
);
450 /* Remainder in n0 >> bm. */
460 #endif /* UDIV_NEEDS_NORMALIZATION */
471 /* Remainder in n1n0. */
483 count_leading_zeros (bm
, d1
);
486 /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
487 conclude (the most significant bit of n1 is set) /\ (the
488 quotient digit q0 = 0 or 1).
490 This special case is necessary, not an optimization. */
492 /* The condition on the next line takes advantage of that
493 n1 >= d1 (true due to program flow). */
494 if (n1
> d1
|| n0
>= d0
)
497 sub_ddmmss (n1
, n0
, n1
, n0
, d1
, d0
);
516 b
= W_TYPE_SIZE
- bm
;
518 d1
= (d1
<< bm
) | (d0
>> b
);
521 n1
= (n1
<< bm
) | (n0
>> b
);
524 udiv_qrnnd (q0
, n1
, n2
, n1
, d1
);
525 umul_ppmm (m1
, m0
, q0
, d0
);
527 if (m1
> n1
|| (m1
== n1
&& m0
> n0
))
530 sub_ddmmss (m1
, m0
, m1
, m0
, d1
, d0
);
535 /* Remainder in (n1n0 - m1m0) >> bm. */
538 sub_ddmmss (n1
, n0
, n1
, n0
, m1
, m0
);
539 rr
.s
.low
= (n1
<< b
) | (n0
>> bm
);
540 rr
.s
.high
= n1
>> bm
;
555 __divdi3 (DWtype u
, DWtype v
)
566 uu
.ll
= __negdi2 (uu
.ll
);
569 vv
.ll
= __negdi2 (vv
.ll
);
571 w
= __udivmoddi4 (uu
.ll
, vv
.ll
, (UDWtype
*) 0);
581 __moddi3 (DWtype u
, DWtype v
)
592 uu
.ll
= __negdi2 (uu
.ll
);
594 vv
.ll
= __negdi2 (vv
.ll
);
596 (void) __udivmoddi4 (uu
.ll
, vv
.ll
, &w
);
606 __umoddi3 (UDWtype u
, UDWtype v
)
610 (void) __udivmoddi4 (u
, v
, &w
);
618 __udivdi3 (UDWtype n
, UDWtype d
)
620 return __udivmoddi4 (n
, d
, (UDWtype
*) 0);
626 __cmpdi2 (DWtype a
, DWtype b
)
630 au
.ll
= a
, bu
.ll
= b
;
632 if (au
.s
.high
< bu
.s
.high
)
634 else if (au
.s
.high
> bu
.s
.high
)
636 if ((UWtype
) au
.s
.low
< (UWtype
) bu
.s
.low
)
638 else if ((UWtype
) au
.s
.low
> (UWtype
) bu
.s
.low
)
646 __ucmpdi2 (DWtype a
, DWtype b
)
650 au
.ll
= a
, bu
.ll
= b
;
652 if ((UWtype
) au
.s
.high
< (UWtype
) bu
.s
.high
)
654 else if ((UWtype
) au
.s
.high
> (UWtype
) bu
.s
.high
)
656 if ((UWtype
) au
.s
.low
< (UWtype
) bu
.s
.low
)
658 else if ((UWtype
) au
.s
.low
> (UWtype
) bu
.s
.low
)
664 #if defined(L_fixunstfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
665 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
666 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
669 __fixunstfDI (TFtype a
)
677 /* Compute high word of result, as a flonum. */
678 b
= (a
/ HIGH_WORD_COEFF
);
679 /* Convert that to fixed (but not to DWtype!),
680 and shift it into the high word. */
683 /* Remove high part from the TFtype, leaving the low part as flonum. */
685 /* Convert that to fixed (but not to DWtype!) and add it in.
686 Sometimes A comes out negative. This is significant, since
687 A has more bits than a long int does. */
696 #if defined(L_fixtfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
701 return - __fixunstfDI (-a
);
702 return __fixunstfDI (a
);
706 #if defined(L_fixunsxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
707 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
708 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
711 __fixunsxfDI (XFtype a
)
719 /* Compute high word of result, as a flonum. */
720 b
= (a
/ HIGH_WORD_COEFF
);
721 /* Convert that to fixed (but not to DWtype!),
722 and shift it into the high word. */
725 /* Remove high part from the XFtype, leaving the low part as flonum. */
727 /* Convert that to fixed (but not to DWtype!) and add it in.
728 Sometimes A comes out negative. This is significant, since
729 A has more bits than a long int does. */
738 #if defined(L_fixxfdi) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
743 return - __fixunsxfDI (-a
);
744 return __fixunsxfDI (a
);
749 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
750 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
753 __fixunsdfDI (DFtype a
)
761 /* Compute high word of result, as a flonum. */
762 b
= (a
/ HIGH_WORD_COEFF
);
763 /* Convert that to fixed (but not to DWtype!),
764 and shift it into the high word. */
767 /* Remove high part from the DFtype, leaving the low part as flonum. */
769 /* Convert that to fixed (but not to DWtype!) and add it in.
770 Sometimes A comes out negative. This is significant, since
771 A has more bits than a long int does. */
785 return - __fixunsdfDI (-a
);
786 return __fixunsdfDI (a
);
791 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
792 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
795 __fixunssfDI (SFtype original_a
)
797 /* Convert the SFtype to a DFtype, because that is surely not going
798 to lose any bits. Some day someone else can write a faster version
799 that avoids converting to DFtype, and verify it really works right. */
800 DFtype a
= original_a
;
807 /* Compute high word of result, as a flonum. */
808 b
= (a
/ HIGH_WORD_COEFF
);
809 /* Convert that to fixed (but not to DWtype!),
810 and shift it into the high word. */
813 /* Remove high part from the DFtype, leaving the low part as flonum. */
815 /* Convert that to fixed (but not to DWtype!) and add it in.
816 Sometimes A comes out negative. This is significant, since
817 A has more bits than a long int does. */
831 return - __fixunssfDI (-a
);
832 return __fixunssfDI (a
);
836 #if defined(L_floatdixf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96)
837 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
838 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
839 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
842 __floatdixf (DWtype u
)
846 d
= (Wtype
) (u
>> WORD_SIZE
);
847 d
*= HIGH_HALFWORD_COEFF
;
848 d
*= HIGH_HALFWORD_COEFF
;
849 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
855 #if defined(L_floatditf) && (LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 128)
856 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
857 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
858 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
861 __floatditf (DWtype u
)
865 d
= (Wtype
) (u
>> WORD_SIZE
);
866 d
*= HIGH_HALFWORD_COEFF
;
867 d
*= HIGH_HALFWORD_COEFF
;
868 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
875 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
876 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
877 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
880 __floatdidf (DWtype u
)
884 d
= (Wtype
) (u
>> WORD_SIZE
);
885 d
*= HIGH_HALFWORD_COEFF
;
886 d
*= HIGH_HALFWORD_COEFF
;
887 d
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
894 #define WORD_SIZE (sizeof (Wtype) * BITS_PER_UNIT)
895 #define HIGH_HALFWORD_COEFF (((UDWtype) 1) << (WORD_SIZE / 2))
896 #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE)
897 #define DI_SIZE (sizeof (DWtype) * BITS_PER_UNIT)
899 /* Define codes for all the float formats that we know of. Note
900 that this is copied from real.h. */
902 #define UNKNOWN_FLOAT_FORMAT 0
903 #define IEEE_FLOAT_FORMAT 1
904 #define VAX_FLOAT_FORMAT 2
905 #define IBM_FLOAT_FORMAT 3
907 /* Default to IEEE float if not specified. Nearly all machines use it. */
908 #ifndef HOST_FLOAT_FORMAT
909 #define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
912 #if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
917 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
922 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
928 __floatdisf (DWtype u
)
930 /* Do the calculation in DFmode
931 so that we don't lose any of the precision of the high word
932 while multiplying it. */
935 /* Protect against double-rounding error.
936 Represent any low-order bits, that might be truncated in DFmode,
937 by a bit that won't be lost. The bit can go in anywhere below the
938 rounding position of the SFmode. A fixed mask and bit position
939 handles all usual configurations. It doesn't handle the case
940 of 128-bit DImode, however. */
941 if (DF_SIZE
< DI_SIZE
942 && DF_SIZE
> (DI_SIZE
- DF_SIZE
+ SF_SIZE
))
944 #define REP_BIT ((UDWtype) 1 << (DI_SIZE - DF_SIZE))
945 if (! (- ((DWtype
) 1 << DF_SIZE
) < u
946 && u
< ((DWtype
) 1 << DF_SIZE
)))
948 if ((UDWtype
) u
& (REP_BIT
- 1))
952 f
= (Wtype
) (u
>> WORD_SIZE
);
953 f
*= HIGH_HALFWORD_COEFF
;
954 f
*= HIGH_HALFWORD_COEFF
;
955 f
+= (UWtype
) (u
& (HIGH_WORD_COEFF
- 1));
961 #if defined(L_fixunsxfsi) && LIBGCC2_LONG_DOUBLE_TYPE_SIZE == 96
962 /* Reenable the normal types, in case limits.h needs them. */
975 __fixunsxfSI (XFtype a
)
977 if (a
>= - (DFtype
) LONG_MIN
)
978 return (Wtype
) (a
+ LONG_MIN
) - LONG_MIN
;
984 /* Reenable the normal types, in case limits.h needs them. */
997 __fixunsdfSI (DFtype a
)
999 if (a
>= - (DFtype
) LONG_MIN
)
1000 return (Wtype
) (a
+ LONG_MIN
) - LONG_MIN
;
1006 /* Reenable the normal types, in case limits.h needs them. */
1019 __fixunssfSI (SFtype a
)
1021 if (a
>= - (SFtype
) LONG_MIN
)
1022 return (Wtype
) (a
+ LONG_MIN
) - LONG_MIN
;
1027 /* From here on down, the routines use normal data types. */
1029 #define SItype bogus_type
1030 #define USItype bogus_type
1031 #define DItype bogus_type
1032 #define UDItype bogus_type
1033 #define SFtype bogus_type
1034 #define DFtype bogus_type
1052 /* Like bcmp except the sign is meaningful.
1053 Result is negative if S1 is less than S2,
1054 positive if S1 is greater, 0 if S1 and S2 are equal. */
1057 __gcc_bcmp (const unsigned char *s1
, const unsigned char *s2
, size_t size
)
1061 unsigned char c1
= *s1
++, c2
= *s2
++;
1078 #if defined(__svr4__) || defined(__alliant__)
1082 /* The Alliant needs the added underscore. */
1083 asm (".globl __builtin_saveregs");
1084 asm ("__builtin_saveregs:");
1085 asm (".globl ___builtin_saveregs");
1086 asm ("___builtin_saveregs:");
1088 asm (" andnot 0x0f,%sp,%sp"); /* round down to 16-byte boundary */
1089 asm (" adds -96,%sp,%sp"); /* allocate stack space for reg save
1090 area and also for a new va_list
1092 /* Save all argument registers in the arg reg save area. The
1093 arg reg save area must have the following layout (according
1105 asm (" fst.q %f8, 0(%sp)"); /* save floating regs (f8-f15) */
1106 asm (" fst.q %f12,16(%sp)");
1108 asm (" st.l %r16,32(%sp)"); /* save integer regs (r16-r27) */
1109 asm (" st.l %r17,36(%sp)");
1110 asm (" st.l %r18,40(%sp)");
1111 asm (" st.l %r19,44(%sp)");
1112 asm (" st.l %r20,48(%sp)");
1113 asm (" st.l %r21,52(%sp)");
1114 asm (" st.l %r22,56(%sp)");
1115 asm (" st.l %r23,60(%sp)");
1116 asm (" st.l %r24,64(%sp)");
1117 asm (" st.l %r25,68(%sp)");
1118 asm (" st.l %r26,72(%sp)");
1119 asm (" st.l %r27,76(%sp)");
1121 asm (" adds 80,%sp,%r16"); /* compute the address of the new
1122 va_list structure. Put in into
1123 r16 so that it will be returned
1126 /* Initialize all fields of the new va_list structure. This
1127 structure looks like:
1130 unsigned long ireg_used;
1131 unsigned long freg_used;
1137 asm (" st.l %r0, 0(%r16)"); /* nfixed */
1138 asm (" st.l %r0, 4(%r16)"); /* nfloating */
1139 asm (" st.l %sp, 8(%r16)"); /* __va_ctl points to __va_struct. */
1140 asm (" bri %r1"); /* delayed return */
1141 asm (" st.l %r28,12(%r16)"); /* pointer to overflow args */
1143 #else /* not __svr4__ */
1144 #if defined(__PARAGON__)
1146 * we'll use SVR4-ish varargs but need SVR3.2 assembler syntax,
1147 * and we stand a better chance of hooking into libraries
1148 * compiled by PGI. [andyp@ssd.intel.com]
1152 asm (".globl __builtin_saveregs");
1153 asm ("__builtin_saveregs:");
1154 asm (".globl ___builtin_saveregs");
1155 asm ("___builtin_saveregs:");
1157 asm (" andnot 0x0f,sp,sp"); /* round down to 16-byte boundary */
1158 asm (" adds -96,sp,sp"); /* allocate stack space for reg save
1159 area and also for a new va_list
1161 /* Save all argument registers in the arg reg save area. The
1162 arg reg save area must have the following layout (according
1174 asm (" fst.q f8, 0(sp)");
1175 asm (" fst.q f12,16(sp)");
1176 asm (" st.l r16,32(sp)");
1177 asm (" st.l r17,36(sp)");
1178 asm (" st.l r18,40(sp)");
1179 asm (" st.l r19,44(sp)");
1180 asm (" st.l r20,48(sp)");
1181 asm (" st.l r21,52(sp)");
1182 asm (" st.l r22,56(sp)");
1183 asm (" st.l r23,60(sp)");
1184 asm (" st.l r24,64(sp)");
1185 asm (" st.l r25,68(sp)");
1186 asm (" st.l r26,72(sp)");
1187 asm (" st.l r27,76(sp)");
1189 asm (" adds 80,sp,r16"); /* compute the address of the new
1190 va_list structure. Put in into
1191 r16 so that it will be returned
1194 /* Initialize all fields of the new va_list structure. This
1195 structure looks like:
1198 unsigned long ireg_used;
1199 unsigned long freg_used;
1205 asm (" st.l r0, 0(r16)"); /* nfixed */
1206 asm (" st.l r0, 4(r16)"); /* nfloating */
1207 asm (" st.l sp, 8(r16)"); /* __va_ctl points to __va_struct. */
1208 asm (" bri r1"); /* delayed return */
1209 asm (" st.l r28,12(r16)"); /* pointer to overflow args */
1210 #else /* not __PARAGON__ */
1214 asm (".globl ___builtin_saveregs");
1215 asm ("___builtin_saveregs:");
1216 asm (" mov sp,r30");
1217 asm (" andnot 0x0f,sp,sp");
1218 asm (" adds -96,sp,sp"); /* allocate sufficient space on the stack */
1220 /* Fill in the __va_struct. */
1221 asm (" st.l r16, 0(sp)"); /* save integer regs (r16-r27) */
1222 asm (" st.l r17, 4(sp)"); /* int fixed[12] */
1223 asm (" st.l r18, 8(sp)");
1224 asm (" st.l r19,12(sp)");
1225 asm (" st.l r20,16(sp)");
1226 asm (" st.l r21,20(sp)");
1227 asm (" st.l r22,24(sp)");
1228 asm (" st.l r23,28(sp)");
1229 asm (" st.l r24,32(sp)");
1230 asm (" st.l r25,36(sp)");
1231 asm (" st.l r26,40(sp)");
1232 asm (" st.l r27,44(sp)");
1234 asm (" fst.q f8, 48(sp)"); /* save floating regs (f8-f15) */
1235 asm (" fst.q f12,64(sp)"); /* int floating[8] */
1237 /* Fill in the __va_ctl. */
1238 asm (" st.l sp, 80(sp)"); /* __va_ctl points to __va_struct. */
1239 asm (" st.l r28,84(sp)"); /* pointer to more args */
1240 asm (" st.l r0, 88(sp)"); /* nfixed */
1241 asm (" st.l r0, 92(sp)"); /* nfloating */
1243 asm (" adds 80,sp,r16"); /* return address of the __va_ctl. */
1245 asm (" mov r30,sp");
1246 /* recover stack and pass address to start
1248 #endif /* not __PARAGON__ */
1249 #endif /* not __svr4__ */
1250 #else /* not __i860__ */
1252 asm (".global __builtin_saveregs");
1253 asm ("__builtin_saveregs:");
1254 asm (".global ___builtin_saveregs");
1255 asm ("___builtin_saveregs:");
1256 #ifdef NEED_PROC_COMMAND
1259 asm ("st %i0,[%fp+68]");
1260 asm ("st %i1,[%fp+72]");
1261 asm ("st %i2,[%fp+76]");
1262 asm ("st %i3,[%fp+80]");
1263 asm ("st %i4,[%fp+84]");
1265 asm ("st %i5,[%fp+88]");
1266 #ifdef NEED_TYPE_COMMAND
1267 asm (".type __builtin_saveregs,#function");
1268 asm (".size __builtin_saveregs,.-__builtin_saveregs");
1270 #else /* not __sparc__ */
1271 #if defined(__MIPSEL__) | defined(__R3000__) | defined(__R2000__) | defined(__mips__)
1275 asm (" .set nomips16");
1277 asm (" .ent __builtin_saveregs");
1278 asm (" .globl __builtin_saveregs");
1279 asm ("__builtin_saveregs:");
1280 asm (" sw $4,0($30)");
1281 asm (" sw $5,4($30)");
1282 asm (" sw $6,8($30)");
1283 asm (" sw $7,12($30)");
1285 asm (" .end __builtin_saveregs");
1286 #else /* not __mips__, etc. */
1288 void * __attribute__ ((__noreturn__
))
1289 __builtin_saveregs (void)
1294 #endif /* not __mips__ */
1295 #endif /* not __sparc__ */
1296 #endif /* not __i860__ */
1300 #ifndef inhibit_libc
1302 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1304 /* This is used by the `assert' macro. */
1306 __eprintf (const char *string
, const char *expression
,
1307 unsigned int line
, const char *filename
)
1309 fprintf (stderr
, string
, expression
, line
, filename
);
1319 /* Structure emitted by -a */
1323 const char *filename
;
1327 const unsigned long *addresses
;
1329 /* Older GCC's did not emit these fields. */
1331 const char **functions
;
1332 const long *line_nums
;
1333 const char **filenames
;
1337 #ifdef BLOCK_PROFILER_CODE
1340 #ifndef inhibit_libc
1342 /* Simple minded basic block profiling output dumper for
1343 systems that don't provide tcov support. At present,
1344 it requires atexit and stdio. */
1346 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
1350 #include "gbl-ctors.h"
1351 #include "gcov-io.h"
1353 #ifdef TARGET_HAS_F_SETLKW
1358 static struct bb
*bb_head
;
1360 static int num_digits (long value
, int base
) __attribute__ ((const));
1362 /* Return the number of digits needed to print a value */
1363 /* __inline__ */ static int num_digits (long value
, int base
)
1365 int minus
= (value
< 0 && base
!= 16);
1366 unsigned long v
= (minus
) ? -value
: value
;
1380 __bb_exit_func (void)
1382 FILE *da_file
, *file
;
1389 i
= strlen (bb_head
->filename
) - 3;
1391 if (!strcmp (bb_head
->filename
+i
, ".da"))
1393 /* Must be -fprofile-arcs not -a.
1394 Dump data in a form that gcov expects. */
1398 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1402 /* Make sure the output file exists -
1403 but don't clobber exiting data. */
1404 if ((da_file
= fopen (ptr
->filename
, "a")) != 0)
1407 /* Need to re-open in order to be able to write from the start. */
1408 da_file
= fopen (ptr
->filename
, "r+b");
1409 /* Some old systems might not allow the 'b' mode modifier.
1410 Therefore, try to open without it. This can lead to a race
1411 condition so that when you delete and re-create the file, the
1412 file might be opened in text mode, but then, you shouldn't
1413 delete the file in the first place. */
1415 da_file
= fopen (ptr
->filename
, "r+");
1418 fprintf (stderr
, "arc profiling: Can't open output file %s.\n",
1423 /* After a fork, another process might try to read and/or write
1424 the same file simultanously. So if we can, lock the file to
1425 avoid race conditions. */
1426 #if defined (TARGET_HAS_F_SETLKW)
1428 struct flock s_flock
;
1430 s_flock
.l_type
= F_WRLCK
;
1431 s_flock
.l_whence
= SEEK_SET
;
1432 s_flock
.l_start
= 0;
1434 s_flock
.l_pid
= getpid ();
1436 while (fcntl (fileno (da_file
), F_SETLKW
, &s_flock
)
1441 /* If the file is not empty, and the number of counts in it is the
1442 same, then merge them in. */
1443 firstchar
= fgetc (da_file
);
1444 if (firstchar
== EOF
)
1446 if (ferror (da_file
))
1448 fprintf (stderr
, "arc profiling: Can't read output file ");
1449 perror (ptr
->filename
);
1456 if (ungetc (firstchar
, da_file
) == EOF
)
1458 if (__read_long (&n_counts
, da_file
, 8) != 0)
1460 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1465 if (n_counts
== ptr
->ncounts
)
1469 for (i
= 0; i
< n_counts
; i
++)
1473 if (__read_long (&v
, da_file
, 8) != 0)
1475 fprintf (stderr
, "arc profiling: Can't read output file %s.\n",
1479 ptr
->counts
[i
] += v
;
1487 /* ??? Should first write a header to the file. Preferably, a 4 byte
1488 magic number, 4 bytes containing the time the program was
1489 compiled, 4 bytes containing the last modification time of the
1490 source file, and 4 bytes indicating the compiler options used.
1492 That way we can easily verify that the proper source/executable/
1493 data file combination is being used from gcov. */
1495 if (__write_long (ptr
->ncounts
, da_file
, 8) != 0)
1498 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1504 long *count_ptr
= ptr
->counts
;
1506 for (j
= ptr
->ncounts
; j
> 0; j
--)
1508 if (__write_long (*count_ptr
, da_file
, 8) != 0)
1516 fprintf (stderr
, "arc profiling: Error writing output file %s.\n",
1520 if (fclose (da_file
) == EOF
)
1521 fprintf (stderr
, "arc profiling: Error closing output file %s.\n",
1528 /* Must be basic block profiling. Emit a human readable output file. */
1530 file
= fopen ("bb.out", "a");
1539 /* This is somewhat type incorrect, but it avoids worrying about
1540 exactly where time.h is included from. It should be ok unless
1541 a void * differs from other pointer formats, or if sizeof (long)
1542 is < sizeof (time_t). It would be nice if we could assume the
1543 use of rationale standards here. */
1545 time ((void *) &time_value
);
1546 fprintf (file
, "Basic block profiling finished on %s\n", ctime ((void *) &time_value
));
1548 /* We check the length field explicitly in order to allow compatibility
1549 with older GCC's which did not provide it. */
1551 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1554 int func_p
= (ptr
->nwords
>= (long) sizeof (struct bb
)
1555 && ptr
->nwords
<= 1000
1557 int line_p
= (func_p
&& ptr
->line_nums
);
1558 int file_p
= (func_p
&& ptr
->filenames
);
1559 int addr_p
= (ptr
->addresses
!= 0);
1560 long ncounts
= ptr
->ncounts
;
1566 int blk_len
= num_digits (ncounts
, 10);
1571 fprintf (file
, "File %s, %ld basic blocks \n\n",
1572 ptr
->filename
, ncounts
);
1574 /* Get max values for each field. */
1575 for (i
= 0; i
< ncounts
; i
++)
1580 if (cnt_max
< ptr
->counts
[i
])
1581 cnt_max
= ptr
->counts
[i
];
1583 if (addr_p
&& (unsigned long) addr_max
< ptr
->addresses
[i
])
1584 addr_max
= ptr
->addresses
[i
];
1586 if (line_p
&& line_max
< ptr
->line_nums
[i
])
1587 line_max
= ptr
->line_nums
[i
];
1591 p
= (ptr
->functions
[i
]) ? (ptr
->functions
[i
]) : "<none>";
1599 p
= (ptr
->filenames
[i
]) ? (ptr
->filenames
[i
]) : "<none>";
1606 addr_len
= num_digits (addr_max
, 16);
1607 cnt_len
= num_digits (cnt_max
, 10);
1608 line_len
= num_digits (line_max
, 10);
1610 /* Now print out the basic block information. */
1611 for (i
= 0; i
< ncounts
; i
++)
1614 " Block #%*d: executed %*ld time(s)",
1616 cnt_len
, ptr
->counts
[i
]);
1619 fprintf (file
, " address= 0x%.*lx", addr_len
,
1623 fprintf (file
, " function= %-*s", func_len
,
1624 (ptr
->functions
[i
]) ? ptr
->functions
[i
] : "<none>");
1627 fprintf (file
, " line= %*ld", line_len
, ptr
->line_nums
[i
]);
1630 fprintf (file
, " file= %s",
1631 (ptr
->filenames
[i
]) ? ptr
->filenames
[i
] : "<none>");
1633 fprintf (file
, "\n");
1636 fprintf (file
, "\n");
1640 fprintf (file
, "\n\n");
1646 __bb_init_func (struct bb
*blocks
)
1648 /* User is supposed to check whether the first word is non-0,
1649 but just in case.... */
1651 if (blocks
->zero_word
)
1654 /* Initialize destructor. */
1656 atexit (__bb_exit_func
);
1658 /* Set up linked list. */
1659 blocks
->zero_word
= 1;
1660 blocks
->next
= bb_head
;
1664 /* Called before fork or exec - write out profile information gathered so
1665 far and reset it to zero. This avoids duplication or loss of the
1666 profile information gathered so far. */
1668 __bb_fork_func (void)
1673 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1676 for (i
= ptr
->ncounts
- 1; i
>= 0; i
--)
1681 #ifndef MACHINE_STATE_SAVE
1682 #define MACHINE_STATE_SAVE(ID)
1684 #ifndef MACHINE_STATE_RESTORE
1685 #define MACHINE_STATE_RESTORE(ID)
1688 /* Number of buckets in hashtable of basic block addresses. */
1690 #define BB_BUCKETS 311
1692 /* Maximum length of string in file bb.in. */
1694 #define BBINBUFSIZE 500
1698 struct bb_edge
*next
;
1699 unsigned long src_addr
;
1700 unsigned long dst_addr
;
1701 unsigned long count
;
1706 TRACE_KEEP
= 0, TRACE_ON
= 1, TRACE_OFF
= 2
1711 struct bb_func
*next
;
1714 enum bb_func_mode mode
;
1717 /* This is the connection to the outside world.
1718 The BLOCK_PROFILER macro must set __bb.blocks
1719 and __bb.blockno. */
1722 unsigned long blockno
;
1726 /* Vars to store addrs of source and destination basic blocks
1729 static unsigned long bb_src
= 0;
1730 static unsigned long bb_dst
= 0;
1732 static FILE *bb_tracefile
= (FILE *) 0;
1733 static struct bb_edge
**bb_hashbuckets
= (struct bb_edge
**) 0;
1734 static struct bb_func
*bb_func_head
= (struct bb_func
*) 0;
1735 static unsigned long bb_callcount
= 0;
1736 static int bb_mode
= 0;
1738 static unsigned long *bb_stack
= (unsigned long *) 0;
1739 static size_t bb_stacksize
= 0;
1741 static int reported
= 0;
1744 Always : Print execution frequencies of basic blocks
1746 bb_mode & 1 != 0 : Dump trace of basic blocks to file bbtrace[.gz]
1747 bb_mode & 2 != 0 : Print jump frequencies to file bb.out.
1748 bb_mode & 4 != 0 : Cut call instructions from basic block flow.
1749 bb_mode & 8 != 0 : Insert return instructions in basic block flow.
1754 /*#include <sys/types.h>*/
1755 #include <sys/stat.h>
1756 /*#include <malloc.h>*/
1758 /* Commands executed by gopen. */
1760 #define GOPENDECOMPRESS "gzip -cd "
1761 #define GOPENCOMPRESS "gzip -c >"
1763 /* Like fopen but pipes through gzip. mode may only be "r" or "w".
1764 If it does not compile, simply replace gopen by fopen and delete
1765 '.gz' from any first parameter to gopen. */
1768 gopen (char *fn
, char *mode
)
1776 if (mode
[0] != 'r' && mode
[0] != 'w')
1779 p
= fn
+ strlen (fn
)-1;
1780 use_gzip
= ((p
[-1] == '.' && (p
[0] == 'Z' || p
[0] == 'z'))
1781 || (p
[-2] == '.' && p
[-1] == 'g' && p
[0] == 'z'));
1788 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1789 + sizeof (GOPENDECOMPRESS
));
1790 strcpy (s
, GOPENDECOMPRESS
);
1791 strcpy (s
+ (sizeof (GOPENDECOMPRESS
)-1), fn
);
1792 f
= popen (s
, mode
);
1800 char *s
= (char *) malloc (sizeof (char) * strlen (fn
)
1801 + sizeof (GOPENCOMPRESS
));
1802 strcpy (s
, GOPENCOMPRESS
);
1803 strcpy (s
+ (sizeof (GOPENCOMPRESS
)-1), fn
);
1804 if (!(f
= popen (s
, mode
)))
1805 f
= fopen (s
, mode
);
1812 return fopen (fn
, mode
);
1822 if (!fstat (fileno (f
), &buf
) && S_ISFIFO (buf
.st_mode
))
1830 #endif /* HAVE_POPEN */
1832 /* Called once per program. */
1835 __bb_exit_trace_func (void)
1837 FILE *file
= fopen ("bb.out", "a");
1850 gclose (bb_tracefile
);
1852 fclose (bb_tracefile
);
1853 #endif /* HAVE_POPEN */
1856 /* Check functions in `bb.in'. */
1861 const struct bb_func
*p
;
1862 int printed_something
= 0;
1866 /* This is somewhat type incorrect. */
1867 time ((void *) &time_value
);
1869 for (p
= bb_func_head
; p
!= (struct bb_func
*) 0; p
= p
->next
)
1871 for (ptr
= bb_head
; ptr
!= (struct bb
*) 0; ptr
= ptr
->next
)
1873 if (!ptr
->filename
|| (p
->filename
!= (char *) 0 && strcmp (p
->filename
, ptr
->filename
)))
1875 for (blk
= 0; blk
< ptr
->ncounts
; blk
++)
1877 if (!strcmp (p
->funcname
, ptr
->functions
[blk
]))
1882 if (!printed_something
)
1884 fprintf (file
, "Functions in `bb.in' not executed during basic block profiling on %s\n", ctime ((void *) &time_value
));
1885 printed_something
= 1;
1888 fprintf (file
, "\tFunction %s", p
->funcname
);
1890 fprintf (file
, " of file %s", p
->filename
);
1891 fprintf (file
, "\n" );
1896 if (printed_something
)
1897 fprintf (file
, "\n");
1903 if (!bb_hashbuckets
)
1907 fprintf (stderr
, "Profiler: out of memory\n");
1917 unsigned long addr_max
= 0;
1918 unsigned long cnt_max
= 0;
1922 /* This is somewhat type incorrect, but it avoids worrying about
1923 exactly where time.h is included from. It should be ok unless
1924 a void * differs from other pointer formats, or if sizeof (long)
1925 is < sizeof (time_t). It would be nice if we could assume the
1926 use of rationale standards here. */
1928 time ((void *) &time_value
);
1929 fprintf (file
, "Basic block jump tracing");
1931 switch (bb_mode
& 12)
1934 fprintf (file
, " (with call)");
1938 /* Print nothing. */
1942 fprintf (file
, " (with call & ret)");
1946 fprintf (file
, " (with ret)");
1950 fprintf (file
, " finished on %s\n", ctime ((void *) &time_value
));
1952 for (i
= 0; i
< BB_BUCKETS
; i
++)
1954 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
1955 for ( ; bucket
; bucket
= bucket
->next
)
1957 if (addr_max
< bucket
->src_addr
)
1958 addr_max
= bucket
->src_addr
;
1959 if (addr_max
< bucket
->dst_addr
)
1960 addr_max
= bucket
->dst_addr
;
1961 if (cnt_max
< bucket
->count
)
1962 cnt_max
= bucket
->count
;
1965 addr_len
= num_digits (addr_max
, 16);
1966 cnt_len
= num_digits (cnt_max
, 10);
1968 for ( i
= 0; i
< BB_BUCKETS
; i
++)
1970 struct bb_edge
*bucket
= bb_hashbuckets
[i
];
1971 for ( ; bucket
; bucket
= bucket
->next
)
1974 "Jump from block 0x%.*lx to block 0x%.*lx executed %*lu time(s)\n",
1975 addr_len
, bucket
->src_addr
,
1976 addr_len
, bucket
->dst_addr
,
1977 cnt_len
, bucket
->count
);
1981 fprintf (file
, "\n");
1989 /* Free allocated memory. */
1994 struct bb_func
*old
= f
;
1997 if (old
->funcname
) free (old
->funcname
);
1998 if (old
->filename
) free (old
->filename
);
2009 for (i
= 0; i
< BB_BUCKETS
; i
++)
2011 struct bb_edge
*old
, *bucket
= bb_hashbuckets
[i
];
2016 bucket
= bucket
->next
;
2020 free (bb_hashbuckets
);
2023 for (b
= bb_head
; b
; b
= b
->next
)
2024 if (b
->flags
) free (b
->flags
);
2027 /* Called once per program. */
2030 __bb_init_prg (void)
2033 char buf
[BBINBUFSIZE
];
2036 enum bb_func_mode m
;
2039 /* Initialize destructor. */
2040 atexit (__bb_exit_func
);
2042 if (!(file
= fopen ("bb.in", "r")))
2045 while(fgets (buf
, BBINBUFSIZE
, file
) != 0)
2061 if (!strcmp (p
, "__bb_trace__"))
2063 else if (!strcmp (p
, "__bb_jumps__"))
2065 else if (!strcmp (p
, "__bb_hidecall__"))
2067 else if (!strcmp (p
, "__bb_showret__"))
2071 struct bb_func
*f
= (struct bb_func
*) malloc (sizeof (struct bb_func
));
2075 f
->next
= bb_func_head
;
2076 if ((pos
= strchr (p
, ':')))
2078 if (!(f
->funcname
= (char *) malloc (strlen (pos
+1)+1)))
2080 strcpy (f
->funcname
, pos
+1);
2082 if ((f
->filename
= (char *) malloc (l
+1)))
2084 strncpy (f
->filename
, p
, l
);
2085 f
->filename
[l
] = '\0';
2088 f
->filename
= (char *) 0;
2092 if (!(f
->funcname
= (char *) malloc (strlen (p
)+1)))
2094 strcpy (f
->funcname
, p
);
2095 f
->filename
= (char *) 0;
2107 bb_tracefile
= gopen ("bbtrace.gz", "w");
2112 bb_tracefile
= fopen ("bbtrace", "w");
2114 #endif /* HAVE_POPEN */
2118 bb_hashbuckets
= (struct bb_edge
**)
2119 malloc (BB_BUCKETS
* sizeof (struct bb_edge
*));
2121 /* Use a loop here rather than calling bzero to avoid having to
2122 conditionalize its existance. */
2123 for (i
= 0; i
< BB_BUCKETS
; i
++)
2124 bb_hashbuckets
[i
] = 0;
2130 bb_stack
= (unsigned long *) malloc (bb_stacksize
* sizeof (*bb_stack
));
2133 /* Initialize destructor. */
2134 atexit (__bb_exit_trace_func
);
2137 /* Called upon entering a basic block. */
2140 __bb_trace_func (void)
2142 struct bb_edge
*bucket
;
2144 MACHINE_STATE_SAVE("1")
2146 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2149 bb_dst
= __bb
.blocks
->addresses
[__bb
.blockno
];
2150 __bb
.blocks
->counts
[__bb
.blockno
]++;
2154 fwrite (&bb_dst
, sizeof (unsigned long), 1, bb_tracefile
);
2159 struct bb_edge
**startbucket
, **oldnext
;
2161 oldnext
= startbucket
2162 = & bb_hashbuckets
[ (((int) bb_src
*8) ^ (int) bb_dst
) % BB_BUCKETS
];
2163 bucket
= *startbucket
;
2165 for (bucket
= *startbucket
; bucket
;
2166 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2168 if (bucket
->src_addr
== bb_src
2169 && bucket
->dst_addr
== bb_dst
)
2172 *oldnext
= bucket
->next
;
2173 bucket
->next
= *startbucket
;
2174 *startbucket
= bucket
;
2179 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2185 fprintf (stderr
, "Profiler: out of memory\n");
2192 bucket
->src_addr
= bb_src
;
2193 bucket
->dst_addr
= bb_dst
;
2194 bucket
->next
= *startbucket
;
2195 *startbucket
= bucket
;
2206 MACHINE_STATE_RESTORE("1")
2210 /* Called when returning from a function and `__bb_showret__' is set. */
2213 __bb_trace_func_ret (void)
2215 struct bb_edge
*bucket
;
2217 if (!bb_callcount
|| (__bb
.blocks
->flags
&& (__bb
.blocks
->flags
[__bb
.blockno
] & TRACE_OFF
)))
2222 struct bb_edge
**startbucket
, **oldnext
;
2224 oldnext
= startbucket
2225 = & bb_hashbuckets
[ (((int) bb_dst
* 8) ^ (int) bb_src
) % BB_BUCKETS
];
2226 bucket
= *startbucket
;
2228 for (bucket
= *startbucket
; bucket
;
2229 oldnext
= &(bucket
->next
), bucket
= *oldnext
)
2231 if (bucket
->src_addr
== bb_dst
2232 && bucket
->dst_addr
== bb_src
)
2235 *oldnext
= bucket
->next
;
2236 bucket
->next
= *startbucket
;
2237 *startbucket
= bucket
;
2242 bucket
= (struct bb_edge
*) malloc (sizeof (struct bb_edge
));
2248 fprintf (stderr
, "Profiler: out of memory\n");
2255 bucket
->src_addr
= bb_dst
;
2256 bucket
->dst_addr
= bb_src
;
2257 bucket
->next
= *startbucket
;
2258 *startbucket
= bucket
;
2271 /* Called upon entering the first function of a file. */
2274 __bb_init_file (struct bb
*blocks
)
2277 const struct bb_func
*p
;
2278 long blk
, ncounts
= blocks
->ncounts
;
2279 const char **functions
= blocks
->functions
;
2281 /* Set up linked list. */
2282 blocks
->zero_word
= 1;
2283 blocks
->next
= bb_head
;
2288 || !(blocks
->flags
= (char *) malloc (sizeof (char) * blocks
->ncounts
)))
2291 for (blk
= 0; blk
< ncounts
; blk
++)
2292 blocks
->flags
[blk
] = 0;
2294 for (blk
= 0; blk
< ncounts
; blk
++)
2296 for (p
= bb_func_head
; p
; p
= p
->next
)
2298 if (!strcmp (p
->funcname
, functions
[blk
])
2299 && (!p
->filename
|| !strcmp (p
->filename
, blocks
->filename
)))
2301 blocks
->flags
[blk
] |= p
->mode
;
2308 /* Called when exiting from a function. */
2311 __bb_trace_ret (void)
2314 MACHINE_STATE_SAVE("2")
2318 if ((bb_mode
& 12) && bb_stacksize
> bb_callcount
)
2320 bb_src
= bb_stack
[bb_callcount
];
2322 __bb_trace_func_ret ();
2328 MACHINE_STATE_RESTORE("2")
2332 /* Called when entering a function. */
2335 __bb_init_trace_func (struct bb
*blocks
, unsigned long blockno
)
2337 static int trace_init
= 0;
2339 MACHINE_STATE_SAVE("3")
2341 if (!blocks
->zero_word
)
2348 __bb_init_file (blocks
);
2358 if (bb_callcount
>= bb_stacksize
)
2360 size_t newsize
= bb_callcount
+ 100;
2362 bb_stack
= (unsigned long *) realloc (bb_stack
, newsize
);
2367 fprintf (stderr
, "Profiler: out of memory\n");
2371 goto stack_overflow
;
2373 bb_stacksize
= newsize
;
2375 bb_stack
[bb_callcount
] = bb_src
;
2386 else if (blocks
->flags
&& (blocks
->flags
[blockno
] & TRACE_ON
))
2392 bb_stack
[bb_callcount
] = bb_src
;
2395 MACHINE_STATE_RESTORE("3")
2398 #endif /* not inhibit_libc */
2399 #endif /* not BLOCK_PROFILER_CODE */
2403 unsigned int __shtab
[] = {
2404 0x00000001, 0x00000002, 0x00000004, 0x00000008,
2405 0x00000010, 0x00000020, 0x00000040, 0x00000080,
2406 0x00000100, 0x00000200, 0x00000400, 0x00000800,
2407 0x00001000, 0x00002000, 0x00004000, 0x00008000,
2408 0x00010000, 0x00020000, 0x00040000, 0x00080000,
2409 0x00100000, 0x00200000, 0x00400000, 0x00800000,
2410 0x01000000, 0x02000000, 0x04000000, 0x08000000,
2411 0x10000000, 0x20000000, 0x40000000, 0x80000000
2415 #ifdef L_clear_cache
2416 /* Clear part of an instruction cache. */
2418 #define INSN_CACHE_PLANE_SIZE (INSN_CACHE_SIZE / INSN_CACHE_DEPTH)
2421 __clear_cache (char *beg
__attribute__((__unused__
)),
2422 char *end
__attribute__((__unused__
)))
2424 #ifdef CLEAR_INSN_CACHE
2425 CLEAR_INSN_CACHE (beg
, end
);
2427 #ifdef INSN_CACHE_SIZE
2428 static char array
[INSN_CACHE_SIZE
+ INSN_CACHE_PLANE_SIZE
+ INSN_CACHE_LINE_WIDTH
];
2429 static int initialized
;
2433 typedef (*function_ptr
) (void);
2435 #if (INSN_CACHE_SIZE / INSN_CACHE_LINE_WIDTH) < 16
2436 /* It's cheaper to clear the whole cache.
2437 Put in a series of jump instructions so that calling the beginning
2438 of the cache will clear the whole thing. */
2442 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2443 & -INSN_CACHE_LINE_WIDTH
);
2444 int end_ptr
= ptr
+ INSN_CACHE_SIZE
;
2446 while (ptr
< end_ptr
)
2448 *(INSTRUCTION_TYPE
*)ptr
2449 = JUMP_AHEAD_INSTRUCTION
+ INSN_CACHE_LINE_WIDTH
;
2450 ptr
+= INSN_CACHE_LINE_WIDTH
;
2452 *(INSTRUCTION_TYPE
*) (ptr
- INSN_CACHE_LINE_WIDTH
) = RETURN_INSTRUCTION
;
2457 /* Call the beginning of the sequence. */
2458 (((function_ptr
) (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2459 & -INSN_CACHE_LINE_WIDTH
))
2462 #else /* Cache is large. */
2466 int ptr
= (((int) array
+ INSN_CACHE_LINE_WIDTH
- 1)
2467 & -INSN_CACHE_LINE_WIDTH
);
2469 while (ptr
< (int) array
+ sizeof array
)
2471 *(INSTRUCTION_TYPE
*)ptr
= RETURN_INSTRUCTION
;
2472 ptr
+= INSN_CACHE_LINE_WIDTH
;
2478 /* Find the location in array that occupies the same cache line as BEG. */
2480 offset
= ((int) beg
& -INSN_CACHE_LINE_WIDTH
) & (INSN_CACHE_PLANE_SIZE
- 1);
2481 start_addr
= (((int) (array
+ INSN_CACHE_PLANE_SIZE
- 1)
2482 & -INSN_CACHE_PLANE_SIZE
)
2485 /* Compute the cache alignment of the place to stop clearing. */
2486 #if 0 /* This is not needed for gcc's purposes. */
2487 /* If the block to clear is bigger than a cache plane,
2488 we clear the entire cache, and OFFSET is already correct. */
2489 if (end
< beg
+ INSN_CACHE_PLANE_SIZE
)
2491 offset
= (((int) (end
+ INSN_CACHE_LINE_WIDTH
- 1)
2492 & -INSN_CACHE_LINE_WIDTH
)
2493 & (INSN_CACHE_PLANE_SIZE
- 1));
2495 #if INSN_CACHE_DEPTH > 1
2496 end_addr
= (start_addr
& -INSN_CACHE_PLANE_SIZE
) + offset
;
2497 if (end_addr
<= start_addr
)
2498 end_addr
+= INSN_CACHE_PLANE_SIZE
;
2500 for (plane
= 0; plane
< INSN_CACHE_DEPTH
; plane
++)
2502 int addr
= start_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2503 int stop
= end_addr
+ plane
* INSN_CACHE_PLANE_SIZE
;
2505 while (addr
!= stop
)
2507 /* Call the return instruction at ADDR. */
2508 ((function_ptr
) addr
) ();
2510 addr
+= INSN_CACHE_LINE_WIDTH
;
2513 #else /* just one plane */
2516 /* Call the return instruction at START_ADDR. */
2517 ((function_ptr
) start_addr
) ();
2519 start_addr
+= INSN_CACHE_LINE_WIDTH
;
2521 while ((start_addr
% INSN_CACHE_SIZE
) != offset
);
2522 #endif /* just one plane */
2523 #endif /* Cache is large */
2524 #endif /* Cache exists */
2525 #endif /* CLEAR_INSN_CACHE */
2528 #endif /* L_clear_cache */
2532 /* Jump to a trampoline, loading the static chain address. */
2534 #if defined(WINNT) && ! defined(__CYGWIN__) && ! defined (_UWIN)
2547 extern int VirtualProtect (char *, int, int, int *) __attribute__((stdcall));
2551 mprotect (char *addr
, int len
, int prot
)
2568 if (VirtualProtect (addr
, len
, np
, &op
))
2574 #endif /* WINNT && ! __CYGWIN__ && ! _UWIN */
2576 #ifdef TRANSFER_FROM_TRAMPOLINE
2577 TRANSFER_FROM_TRAMPOLINE
2580 #if defined (NeXT) && defined (__MACH__)
2582 /* Make stack executable so we can call trampolines on stack.
2583 This is called from INITIALIZE_TRAMPOLINE in next.h. */
2587 #include <mach/mach.h>
2591 __enable_execute_stack (char *addr
)
2594 char *eaddr
= addr
+ TRAMPOLINE_SIZE
;
2595 vm_address_t a
= (vm_address_t
) addr
;
2597 /* turn on execute access on stack */
2598 r
= vm_protect (task_self (), a
, TRAMPOLINE_SIZE
, FALSE
, VM_PROT_ALL
);
2599 if (r
!= KERN_SUCCESS
)
2601 mach_error("vm_protect VM_PROT_ALL", r
);
2605 /* We inline the i-cache invalidation for speed */
2607 #ifdef CLEAR_INSN_CACHE
2608 CLEAR_INSN_CACHE (addr
, eaddr
);
2610 __clear_cache ((int) addr
, (int) eaddr
);
2614 #endif /* defined (NeXT) && defined (__MACH__) */
2618 /* Make stack executable so we can call trampolines on stack.
2619 This is called from INITIALIZE_TRAMPOLINE in convex.h. */
2621 #include <sys/mman.h>
2622 #include <sys/vmparam.h>
2623 #include <machine/machparam.h>
2626 __enable_execute_stack (void)
2629 static unsigned lowest
= USRSTACK
;
2630 unsigned current
= (unsigned) &fp
& -NBPG
;
2632 if (lowest
> current
)
2634 unsigned len
= lowest
- current
;
2635 mremap (current
, &len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
, MAP_PRIVATE
);
2639 /* Clear instruction cache in case an old trampoline is in it. */
2642 #endif /* __convex__ */
2646 /* Modified from the convex -code above. */
2648 #include <sys/param.h>
2650 #include <sys/m88kbcs.h>
2653 __enable_execute_stack (void)
2656 static unsigned long lowest
= USRSTACK
;
2657 unsigned long current
= (unsigned long) &save_errno
& -NBPC
;
2659 /* Ignore errno being set. memctl sets errno to EINVAL whenever the
2660 address is seen as 'negative'. That is the case with the stack. */
2663 if (lowest
> current
)
2665 unsigned len
=lowest
-current
;
2666 memctl(current
,len
,MCT_TEXT
);
2670 memctl(current
,NBPC
,MCT_TEXT
);
2674 #endif /* __sysV88__ */
2678 #include <sys/signal.h>
2681 /* Motorola forgot to put memctl.o in the libp version of libc881.a,
2682 so define it here, because we need it in __clear_insn_cache below */
2683 /* On older versions of this OS, no memctl or MCT_TEXT are defined;
2684 hence we enable this stuff only if MCT_TEXT is #define'd. */
2699 /* Clear instruction cache so we can call trampolines on stack.
2700 This is called from FINALIZE_TRAMPOLINE in mot3300.h. */
2703 __clear_insn_cache (void)
2708 /* Preserve errno, because users would be surprised to have
2709 errno changing without explicitly calling any system-call. */
2712 /* Keep it simple : memctl (MCT_TEXT) always fully clears the insn cache.
2713 No need to use an address derived from _start or %sp, as 0 works also. */
2714 memctl(0, 4096, MCT_TEXT
);
2719 #endif /* __sysV68__ */
2723 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
2725 #include <sys/mman.h>
2726 #include <sys/types.h>
2727 #include <sys/param.h>
2728 #include <sys/vmmac.h>
2730 /* Modified from the convex -code above.
2731 mremap promises to clear the i-cache. */
2734 __enable_execute_stack (void)
2737 if (mprotect (((unsigned int)&fp
/PAGSIZ
)*PAGSIZ
, PAGSIZ
,
2738 PROT_READ
|PROT_WRITE
|PROT_EXEC
))
2740 perror ("mprotect in __enable_execute_stack");
2745 #endif /* __pyr__ */
2747 #if defined (sony_news) && defined (SYSTYPE_BSD)
2750 #include <sys/types.h>
2751 #include <sys/param.h>
2752 #include <syscall.h>
2753 #include <machine/sysnews.h>
2755 /* cacheflush function for NEWS-OS 4.2.
2756 This function is called from trampoline-initialize code
2757 defined in config/mips/mips.h. */
2760 cacheflush (char *beg
, int size
, int flag
)
2762 if (syscall (SYS_sysnews
, NEWS_CACHEFLUSH
, beg
, size
, FLUSH_BCACHE
))
2764 perror ("cache_flush");
2770 #endif /* sony_news */
2771 #endif /* L_trampoline */
2776 #include "gbl-ctors.h"
2777 /* Some systems use __main in a way incompatible with its use in gcc, in these
2778 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
2779 give the same symbol without quotes for an alternative entry point. You
2780 must define both, or neither. */
2782 #define NAME__MAIN "__main"
2783 #define SYMBOL__MAIN __main
2786 #ifdef INIT_SECTION_ASM_OP
2787 #undef HAS_INIT_SECTION
2788 #define HAS_INIT_SECTION
2791 #if !defined (HAS_INIT_SECTION) || !defined (OBJECT_FORMAT_ELF)
2793 /* Some ELF crosses use crtstuff.c to provide __CTOR_LIST__, but use this
2794 code to run constructors. In that case, we need to handle EH here, too. */
2796 #ifdef EH_FRAME_SECTION
2798 extern unsigned char __EH_FRAME_BEGIN__
[];
2801 /* Run all the global destructors on exit from the program. */
2804 __do_global_dtors (void)
2806 #ifdef DO_GLOBAL_DTORS_BODY
2807 DO_GLOBAL_DTORS_BODY
;
2809 static func_ptr
*p
= __DTOR_LIST__
+ 1;
2816 #if defined (EH_FRAME_SECTION) && !defined (HAS_INIT_SECTION)
2818 static int completed
= 0;
2822 __deregister_frame_info (__EH_FRAME_BEGIN__
);
2829 #ifndef HAS_INIT_SECTION
2830 /* Run all the global constructors on entry to the program. */
2833 __do_global_ctors (void)
2835 #ifdef EH_FRAME_SECTION
2837 static struct object object
;
2838 __register_frame_info (__EH_FRAME_BEGIN__
, &object
);
2841 DO_GLOBAL_CTORS_BODY
;
2842 atexit (__do_global_dtors
);
2844 #endif /* no HAS_INIT_SECTION */
2846 #if !defined (HAS_INIT_SECTION) || defined (INVOKE__main)
2847 /* Subroutine called automatically by `main'.
2848 Compiling a global function named `main'
2849 produces an automatic call to this function at the beginning.
2851 For many systems, this routine calls __do_global_ctors.
2852 For systems which support a .init section we use the .init section
2853 to run __do_global_ctors, so we need not do anything here. */
2858 /* Support recursive calls to `main': run initializers just once. */
2859 static int initialized
;
2863 __do_global_ctors ();
2866 #endif /* no HAS_INIT_SECTION or INVOKE__main */
2868 #endif /* L__main */
2869 #endif /* __CYGWIN__ */
2873 #include "gbl-ctors.h"
2875 /* Provide default definitions for the lists of constructors and
2876 destructors, so that we don't get linker errors. These symbols are
2877 intentionally bss symbols, so that gld and/or collect will provide
2878 the right values. */
2880 /* We declare the lists here with two elements each,
2881 so that they are valid empty lists if no other definition is loaded.
2883 If we are using the old "set" extensions to have the gnu linker
2884 collect ctors and dtors, then we __CTOR_LIST__ and __DTOR_LIST__
2885 must be in the bss/common section.
2887 Long term no port should use those extensions. But many still do. */
2888 #if !defined(INIT_SECTION_ASM_OP) && !defined(CTOR_LISTS_DEFINED_EXTERNALLY)
2889 #if defined (ASM_OUTPUT_CONSTRUCTOR) || defined (USE_COLLECT2)
2890 func_ptr __CTOR_LIST__
[2] = {0, 0};
2891 func_ptr __DTOR_LIST__
[2] = {0, 0};
2893 func_ptr __CTOR_LIST__
[2];
2894 func_ptr __DTOR_LIST__
[2];
2896 #endif /* no INIT_SECTION_ASM_OP and not CTOR_LISTS_DEFINED_EXTERNALLY */
2897 #endif /* L_ctors */
2901 #include "gbl-ctors.h"
2909 static func_ptr
*atexit_chain
= 0;
2910 static long atexit_chain_length
= 0;
2911 static volatile long last_atexit_chain_slot
= -1;
2914 atexit (func_ptr func
)
2916 if (++last_atexit_chain_slot
== atexit_chain_length
)
2918 atexit_chain_length
+= 32;
2920 atexit_chain
= (func_ptr
*) realloc (atexit_chain
, atexit_chain_length
2921 * sizeof (func_ptr
));
2923 atexit_chain
= (func_ptr
*) malloc (atexit_chain_length
2924 * sizeof (func_ptr
));
2927 atexit_chain_length
= 0;
2928 last_atexit_chain_slot
= -1;
2933 atexit_chain
[last_atexit_chain_slot
] = func
;
2937 extern void _cleanup (void);
2938 extern void _exit (int) __attribute__ ((__noreturn__
));
2945 for ( ; last_atexit_chain_slot
-- >= 0; )
2947 (*atexit_chain
[last_atexit_chain_slot
+ 1]) ();
2948 atexit_chain
[last_atexit_chain_slot
+ 1] = 0;
2950 free (atexit_chain
);
2963 /* Simple; we just need a wrapper for ON_EXIT. */
2965 atexit (func_ptr func
)
2967 return ON_EXIT (func
);
2970 #endif /* ON_EXIT */
2971 #endif /* NEED_ATEXIT */
2979 /* Shared exception handling support routines. */
2982 __default_terminate (void)
2987 void (*__terminate_func
)(void) __attribute__ ((__noreturn__
)) =
2988 __default_terminate
;
2993 (*__terminate_func
)();
2997 __throw_type_match (void *catch_type
, void *throw_type
, void *obj
)
3000 printf ("__throw_type_match (): catch_type = %s, throw_type = %s\n",
3001 catch_type
, throw_type
);
3003 if (strcmp ((const char *)catch_type
, (const char *)throw_type
) == 0)
3014 /* Include definitions of EH context and table layout */
3016 #include "eh-common.h"
3017 #ifndef inhibit_libc
3021 /* Allocate and return a new EH context structure. */
3025 new_eh_context (void)
3027 struct eh_full_context
{
3028 struct eh_context c
;
3030 } *ehfc
= (struct eh_full_context
*) malloc (sizeof *ehfc
);
3035 memset (ehfc
, 0, sizeof *ehfc
);
3037 ehfc
->c
.dynamic_handler_chain
= (void **) ehfc
->top_elt
;
3039 /* This should optimize out entirely. This should always be true,
3040 but just in case it ever isn't, don't allow bogus code to be
3043 if ((void*)(&ehfc
->c
) != (void*)ehfc
)
3049 static __gthread_key_t eh_context_key
;
3051 /* Destructor for struct eh_context. */
3053 eh_context_free (void *ptr
)
3055 __gthread_key_dtor (eh_context_key
, ptr
);
3061 /* Pointer to function to return EH context. */
3063 static struct eh_context
*eh_context_initialize (void);
3064 static struct eh_context
*eh_context_static (void);
3066 static struct eh_context
*eh_context_specific (void);
3069 static struct eh_context
*(*get_eh_context
) (void) = &eh_context_initialize
;
3071 /* Routine to get EH context.
3072 This one will simply call the function pointer. */
3075 __get_eh_context (void)
3077 return (void *) (*get_eh_context
) ();
3080 /* Get and set the language specific info pointer. */
3083 __get_eh_info (void)
3085 struct eh_context
*eh
= (*get_eh_context
) ();
3089 #ifdef DWARF2_UNWIND_INFO
3090 static int dwarf_reg_size_table_initialized
= 0;
3091 static char dwarf_reg_size_table
[DWARF_FRAME_REGISTERS
];
3094 init_reg_size_table (void)
3096 __builtin_init_dwarf_reg_size_table (dwarf_reg_size_table
);
3097 dwarf_reg_size_table_initialized
= 1;
3103 eh_threads_initialize (void)
3105 /* Try to create the key. If it fails, revert to static method,
3106 otherwise start using thread specific EH contexts. */
3107 if (__gthread_key_create (&eh_context_key
, &eh_context_free
) == 0)
3108 get_eh_context
= &eh_context_specific
;
3110 get_eh_context
= &eh_context_static
;
3112 #endif /* no __GTHREADS */
3114 /* Initialize EH context.
3115 This will be called only once, since we change GET_EH_CONTEXT
3116 pointer to another routine. */
3118 static struct eh_context
*
3119 eh_context_initialize (void)
3123 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
3124 /* Make sure that get_eh_context does not point to us anymore.
3125 Some systems have dummy thread routines in their libc that
3126 return a success (Solaris 2.6 for example). */
3127 if (__gthread_once (&once
, eh_threads_initialize
) != 0
3128 || get_eh_context
== &eh_context_initialize
)
3130 /* Use static version of EH context. */
3131 get_eh_context
= &eh_context_static
;
3133 #ifdef DWARF2_UNWIND_INFO
3135 static __gthread_once_t once_regsizes
= __GTHREAD_ONCE_INIT
;
3136 if (__gthread_once (&once_regsizes
, init_reg_size_table
) != 0
3137 || ! dwarf_reg_size_table_initialized
)
3138 init_reg_size_table ();
3142 #else /* no __GTHREADS */
3144 /* Use static version of EH context. */
3145 get_eh_context
= &eh_context_static
;
3147 #ifdef DWARF2_UNWIND_INFO
3148 init_reg_size_table ();
3151 #endif /* no __GTHREADS */
3153 return (*get_eh_context
) ();
3156 /* Return a static EH context. */
3158 static struct eh_context
*
3159 eh_context_static (void)
3161 static struct eh_context eh
;
3162 static int initialized
;
3163 static void *top_elt
[2];
3168 memset (&eh
, 0, sizeof eh
);
3169 eh
.dynamic_handler_chain
= top_elt
;
3175 /* Return a thread specific EH context. */
3177 static struct eh_context
*
3178 eh_context_specific (void)
3180 struct eh_context
*eh
;
3181 eh
= (struct eh_context
*) __gthread_getspecific (eh_context_key
);
3184 eh
= new_eh_context ();
3185 if (__gthread_setspecific (eh_context_key
, (void *) eh
) != 0)
3191 #endif /* __GTHREADS */
3193 /* Support routines for setjmp/longjmp exception handling. */
3195 /* Calls to __sjthrow are generated by the compiler when an exception
3196 is raised when using the setjmp/longjmp exception handling codegen
3199 #ifdef DONT_USE_BUILTIN_SETJMP
3200 extern void longjmp (void *, int);
3203 /* Routine to get the head of the current thread's dynamic handler chain
3204 use for exception handling. */
3207 __get_dynamic_handler_chain (void)
3209 struct eh_context
*eh
= (*get_eh_context
) ();
3210 return &eh
->dynamic_handler_chain
;
3213 /* This is used to throw an exception when the setjmp/longjmp codegen
3214 method is used for exception handling.
3216 We call __terminate if there are no handlers left. Otherwise we run the
3217 cleanup actions off the dynamic cleanup stack, and pop the top of the
3218 dynamic handler chain, and use longjmp to transfer back to the associated
3224 struct eh_context
*eh
= (*get_eh_context
) ();
3225 void ***dhc
= &eh
->dynamic_handler_chain
;
3227 void (*func
)(void *, int);
3229 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3230 void ***cleanup
= (void***)&(*dhc
)[1];
3232 /* If there are any cleanups in the chain, run them now. */
3236 void **buf
= (void**)store
;
3241 #ifdef DONT_USE_BUILTIN_SETJMP
3242 if (! setjmp (&buf
[2]))
3244 if (! __builtin_setjmp (&buf
[2]))
3250 func
= (void(*)(void*, int))cleanup
[0][1];
3251 arg
= (void*)cleanup
[0][2];
3253 /* Update this before running the cleanup. */
3254 cleanup
[0] = (void **)cleanup
[0][0];
3267 /* We must call terminate if we try and rethrow an exception, when
3268 there is no exception currently active and when there are no
3270 if (! eh
->info
|| (*dhc
)[0] == 0)
3273 /* Find the jmpbuf associated with the top element of the dynamic
3274 handler chain. The jumpbuf starts two words into the buffer. */
3275 jmpbuf
= &(*dhc
)[2];
3277 /* Then we pop the top element off the dynamic handler chain. */
3278 *dhc
= (void**)(*dhc
)[0];
3280 /* And then we jump to the handler. */
3282 #ifdef DONT_USE_BUILTIN_SETJMP
3283 longjmp (jmpbuf
, 1);
3285 __builtin_longjmp (jmpbuf
, 1);
3289 /* Run cleanups on the dynamic cleanup stack for the current dynamic
3290 handler, then pop the handler off the dynamic handler stack, and
3291 then throw. This is used to skip the first handler, and transfer
3292 control to the next handler in the dynamic handler stack. */
3295 __sjpopnthrow (void)
3297 struct eh_context
*eh
= (*get_eh_context
) ();
3298 void ***dhc
= &eh
->dynamic_handler_chain
;
3299 void (*func
)(void *, int);
3301 /* The cleanup chain is one word into the buffer. Get the cleanup chain. */
3302 void ***cleanup
= (void***)&(*dhc
)[1];
3304 /* If there are any cleanups in the chain, run them now. */
3308 void **buf
= (void**)store
;
3313 #ifdef DONT_USE_BUILTIN_SETJMP
3314 if (! setjmp (&buf
[2]))
3316 if (! __builtin_setjmp (&buf
[2]))
3322 func
= (void(*)(void*, int))cleanup
[0][1];
3323 arg
= (void*)cleanup
[0][2];
3325 /* Update this before running the cleanup. */
3326 cleanup
[0] = (void **)cleanup
[0][0];
3339 /* Then we pop the top element off the dynamic handler chain. */
3340 *dhc
= (void**)(*dhc
)[0];
3345 /* Support code for all exception region-based exception handling. */
3348 __eh_rtime_match (void *rtime
)
3351 __eh_matcher matcher
;
3354 info
= *(__get_eh_info ());
3355 matcher
= ((__eh_info
*)info
)->match_function
;
3358 #ifndef inhibit_libc
3359 fprintf (stderr
, "Internal Compiler Bug: No runtime type matcher.");
3363 ret
= (*matcher
) (info
, rtime
, (void *)0);
3364 return (ret
!= NULL
);
3367 /* This value identifies the place from which an exception is being
3370 #ifdef EH_TABLE_LOOKUP
3376 #ifdef DWARF2_UNWIND_INFO
3378 /* Return the table version of an exception descriptor */
3381 __get_eh_table_version (exception_descriptor
*table
)
3383 return table
->lang
.version
;
3386 /* Return the originating table language of an exception descriptor */
3389 __get_eh_table_language (exception_descriptor
*table
)
3391 return table
->lang
.language
;
3394 /* This routine takes a PC and a pointer to the exception region TABLE for
3395 its translation unit, and returns the address of the exception handler
3396 associated with the closest exception table handler entry associated
3397 with that PC, or 0 if there are no table entries the PC fits in.
3399 In the advent of a tie, we have to give the last entry, as it represents
3403 old_find_exception_handler (void *pc
, old_exception_table
*table
)
3410 /* We can't do a binary search because the table isn't guaranteed
3411 to be sorted from function to function. */
3412 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3414 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3416 /* This can apply. Make sure it is at least as small as
3417 the previous best. */
3418 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3419 && table
[pos
].start_region
>= table
[best
].start_region
))
3422 /* But it is sorted by starting PC within a function. */
3423 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3427 return table
[best
].exception_handler
;
3433 /* find_exception_handler finds the correct handler, if there is one, to
3434 handle an exception.
3435 returns a pointer to the handler which controlled should be transferred
3436 to, or NULL if there is nothing left.
3438 PC - pc where the exception originates. If this is a rethrow,
3439 then this starts out as a pointer to the exception table
3440 entry we wish to rethrow out of.
3441 TABLE - exception table for the current module.
3442 EH_INFO - eh info pointer for this exception.
3443 RETHROW - 1 if this is a rethrow. (see incoming value of PC).
3444 CLEANUP - returned flag indicating whether this is a cleanup handler.
3447 find_exception_handler (void *pc
, exception_descriptor
*table
,
3448 __eh_info
*eh_info
, int rethrow
, int *cleanup
)
3451 void *retval
= NULL
;
3456 /* The new model assumed the table is sorted inner-most out so the
3457 first region we find which matches is the correct one */
3459 exception_table
*tab
= &(table
->table
[0]);
3461 /* Subtract 1 from the PC to avoid hitting the next region */
3464 /* pc is actually the region table entry to rethrow out of */
3465 pos
= ((exception_table
*) pc
) - tab
;
3466 pc
= ((exception_table
*) pc
)->end_region
- 1;
3468 /* The label is always on the LAST handler entry for a region,
3469 so we know the next entry is a different region, even if the
3470 addresses are the same. Make sure its not end of table tho. */
3471 if (tab
[pos
].start_region
!= (void *) -1)
3477 /* We can't do a binary search because the table is in inner-most
3478 to outermost address ranges within functions */
3479 for ( ; tab
[pos
].start_region
!= (void *) -1; pos
++)
3481 if (tab
[pos
].start_region
<= pc
&& tab
[pos
].end_region
> pc
)
3483 if (tab
[pos
].match_info
)
3485 __eh_matcher matcher
= eh_info
->match_function
;
3486 /* match info but no matcher is NOT a match */
3489 void *ret
= (*matcher
)((void *) eh_info
,
3490 tab
[pos
].match_info
, table
);
3494 retval
= tab
[pos
].exception_handler
;
3503 retval
= tab
[pos
].exception_handler
;
3510 #endif /* DWARF2_UNWIND_INFO */
3511 #endif /* EH_TABLE_LOOKUP */
3513 #ifdef DWARF2_UNWIND_INFO
3514 /* Support code for exception handling using static unwind information. */
3518 /* This type is used in get_reg and put_reg to deal with ABIs where a void*
3519 is smaller than a word, such as the Irix 6 n32 ABI. We cast twice to
3520 avoid a warning about casting between int and pointer of different
3523 typedef int ptr_type
__attribute__ ((mode (pointer
)));
3525 #ifdef INCOMING_REGNO
3526 /* Is the saved value for register REG in frame UDATA stored in a register
3527 window in the previous frame? */
3529 /* ??? The Sparc INCOMING_REGNO references TARGET_FLAT. This allows us
3530 to use the macro here. One wonders, though, that perhaps TARGET_FLAT
3531 compiled functions won't work with the frame-unwind stuff here.
3532 Perhaps the entireity of in_reg_window should be conditional on having
3533 seen a DW_CFA_GNU_window_save? */
3534 #define target_flags 0
3537 in_reg_window (int reg
, frame_state
*udata
)
3539 if (udata
->saved
[reg
] == REG_SAVED_REG
)
3540 return INCOMING_REGNO (reg
) == reg
;
3541 if (udata
->saved
[reg
] != REG_SAVED_OFFSET
)
3544 #ifdef STACK_GROWS_DOWNWARD
3545 return udata
->reg_or_offset
[reg
] > 0;
3547 return udata
->reg_or_offset
[reg
] < 0;
3552 in_reg_window (int reg
__attribute__ ((__unused__
)),
3553 frame_state
*udata
__attribute__ ((__unused__
)))
3557 #endif /* INCOMING_REGNO */
3559 /* Get the address of register REG as saved in UDATA, where SUB_UDATA is a
3560 frame called by UDATA or 0. */
3563 get_reg_addr (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3565 while (udata
->saved
[reg
] == REG_SAVED_REG
)
3567 reg
= udata
->reg_or_offset
[reg
];
3568 if (in_reg_window (reg
, udata
))
3574 if (udata
->saved
[reg
] == REG_SAVED_OFFSET
)
3575 return (word_type
*)(udata
->cfa
+ udata
->reg_or_offset
[reg
]);
3580 /* Get the value of register REG as saved in UDATA, where SUB_UDATA is a
3581 frame called by UDATA or 0. */
3583 static inline void *
3584 get_reg (unsigned reg
, frame_state
*udata
, frame_state
*sub_udata
)
3586 return (void *)(ptr_type
) *get_reg_addr (reg
, udata
, sub_udata
);
3589 /* Overwrite the saved value for register REG in frame UDATA with VAL. */
3592 put_reg (unsigned reg
, void *val
, frame_state
*udata
)
3594 *get_reg_addr (reg
, udata
, NULL
) = (word_type
)(ptr_type
) val
;
3597 /* Copy the saved value for register REG from frame UDATA to frame
3598 TARGET_UDATA. Unlike the previous two functions, this can handle
3599 registers that are not one word large. */
3602 copy_reg (unsigned reg
, frame_state
*udata
, frame_state
*target_udata
)
3604 word_type
*preg
= get_reg_addr (reg
, udata
, NULL
);
3605 word_type
*ptreg
= get_reg_addr (reg
, target_udata
, NULL
);
3607 memcpy (ptreg
, preg
, dwarf_reg_size_table
[reg
]);
3610 /* Retrieve the return address for frame UDATA. */
3612 static inline void *
3613 get_return_addr (frame_state
*udata
, frame_state
*sub_udata
)
3615 return __builtin_extract_return_addr
3616 (get_reg (udata
->retaddr_column
, udata
, sub_udata
));
3619 /* Overwrite the return address for frame UDATA with VAL. */
3622 put_return_addr (void *val
, frame_state
*udata
)
3624 val
= __builtin_frob_return_addr (val
);
3625 put_reg (udata
->retaddr_column
, val
, udata
);
3628 /* Given the current frame UDATA and its return address PC, return the
3629 information about the calling frame in CALLER_UDATA. */
3632 next_stack_level (void *pc
, frame_state
*udata
, frame_state
*caller_udata
)
3634 caller_udata
= __frame_state_for (pc
, caller_udata
);
3638 /* Now go back to our caller's stack frame. If our caller's CFA register
3639 was saved in our stack frame, restore it; otherwise, assume the CFA
3640 register is SP and restore it to our CFA value. */
3641 if (udata
->saved
[caller_udata
->cfa_reg
])
3642 caller_udata
->cfa
= get_reg (caller_udata
->cfa_reg
, udata
, 0);
3644 caller_udata
->cfa
= udata
->cfa
;
3645 caller_udata
->cfa
+= caller_udata
->cfa_offset
;
3647 return caller_udata
;
3650 /* Hook to call before __terminate if only cleanup handlers remain. */
3652 __unwinding_cleanup (void)
3656 /* throw_helper performs some of the common grunt work for a throw. This
3657 routine is called by throw and rethrows. This is pretty much split
3658 out from the old __throw routine. An addition has been added which allows
3659 for a dummy call to a routine __unwinding_cleanup() when there are nothing
3660 but cleanups remaining. This allows a debugger to examine the state
3661 at which the throw was executed, before any cleanups, rather than
3662 at the terminate point after the stack has been unwound.
3664 EH is the current eh_context structure.
3665 PC is the address of the call to __throw.
3666 MY_UDATA is the unwind information for __throw.
3667 OFFSET_P is where we return the SP adjustment offset. */
3670 throw_helper (struct eh_context
*eh
, void *pc
, frame_state
*my_udata
,
3673 frame_state ustruct2
, *udata
= &ustruct2
;
3674 frame_state ustruct
;
3675 frame_state
*sub_udata
= &ustruct
;
3676 void *saved_pc
= pc
;
3678 void *handler_p
= 0;
3680 frame_state saved_ustruct
;
3683 int only_cleanup
= 0;
3685 int saved_state
= 0;
3687 __eh_info
*eh_info
= (__eh_info
*)eh
->info
;
3689 /* Do we find a handler based on a re-throw PC? */
3690 if (eh
->table_index
!= (void *) 0)
3693 memcpy (udata
, my_udata
, sizeof (*udata
));
3695 handler
= (void *) 0;
3698 frame_state
*p
= udata
;
3699 udata
= next_stack_level (pc
, udata
, sub_udata
);
3702 /* If we couldn't find the next frame, we lose. */
3706 if (udata
->eh_ptr
== NULL
)
3709 new_eh_model
= (((exception_descriptor
*)(udata
->eh_ptr
))->
3710 runtime_id_field
== NEW_EH_RUNTIME
);
3715 handler
= find_exception_handler (eh
->table_index
, udata
->eh_ptr
,
3716 eh_info
, 1, &cleanup
);
3717 eh
->table_index
= (void *)0;
3721 handler
= find_exception_handler (pc
, udata
->eh_ptr
, eh_info
,
3724 handler
= old_find_exception_handler (pc
, udata
->eh_ptr
);
3726 /* If we found one, we can stop searching, if its not a cleanup.
3727 for cleanups, we save the state, and keep looking. This allows
3728 us to call a debug hook if there are nothing but cleanups left. */
3735 saved_ustruct
= *udata
;
3736 handler_p
= handler
;
3749 /* Otherwise, we continue searching. We subtract 1 from PC to avoid
3750 hitting the beginning of the next region. */
3751 pc
= get_return_addr (udata
, sub_udata
) - 1;
3756 udata
= &saved_ustruct
;
3757 handler
= handler_p
;
3760 __unwinding_cleanup ();
3763 /* If we haven't found a handler by now, this is an unhandled
3768 eh
->handler_label
= handler
;
3770 args_size
= udata
->args_size
;
3773 /* We found a handler in the throw context, no need to unwind. */
3779 /* Unwind all the frames between this one and the handler by copying
3780 their saved register values into our register save slots. */
3782 /* Remember the PC where we found the handler. */
3783 void *handler_pc
= pc
;
3785 /* Start from the throw context again. */
3787 memcpy (udata
, my_udata
, sizeof (*udata
));
3789 while (pc
!= handler_pc
)
3791 frame_state
*p
= udata
;
3792 udata
= next_stack_level (pc
, udata
, sub_udata
);
3795 for (i
= 0; i
< DWARF_FRAME_REGISTERS
; ++i
)
3796 if (i
!= udata
->retaddr_column
&& udata
->saved
[i
])
3798 /* If you modify the saved value of the return address
3799 register on the SPARC, you modify the return address for
3800 your caller's frame. Don't do that here, as it will
3801 confuse get_return_addr. */
3802 if (in_reg_window (i
, udata
)
3803 && udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
3804 && udata
->reg_or_offset
[udata
->retaddr_column
] == i
)
3806 copy_reg (i
, udata
, my_udata
);
3809 pc
= get_return_addr (udata
, sub_udata
) - 1;
3812 /* But we do need to update the saved return address register from
3813 the last frame we unwind, or the handler frame will have the wrong
3815 if (udata
->saved
[udata
->retaddr_column
] == REG_SAVED_REG
)
3817 i
= udata
->reg_or_offset
[udata
->retaddr_column
];
3818 if (in_reg_window (i
, udata
))
3819 copy_reg (i
, udata
, my_udata
);
3822 /* udata now refers to the frame called by the handler frame. */
3824 /* We adjust SP by the difference between __throw's CFA and the CFA for
3825 the frame called by the handler frame, because those CFAs correspond
3826 to the SP values at the two call sites. We need to further adjust by
3827 the args_size of the handler frame itself to get the handler frame's
3828 SP from before the args were pushed for that call. */
3829 #ifdef STACK_GROWS_DOWNWARD
3830 *offset_p
= udata
->cfa
- my_udata
->cfa
+ args_size
;
3832 *offset_p
= my_udata
->cfa
- udata
->cfa
- args_size
;
3839 /* We first search for an exception handler, and if we don't find
3840 it, we call __terminate on the current stack frame so that we may
3841 use the debugger to walk the stack and understand why no handler
3844 If we find one, then we unwind the frames down to the one that
3845 has the handler and transfer control into the handler. */
3847 /*extern void __throw(void) __attribute__ ((__noreturn__));*/
3852 struct eh_context
*eh
= (*get_eh_context
) ();
3856 /* XXX maybe make my_ustruct static so we don't have to look it up for
3858 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3860 /* This is required for C++ semantics. We must call terminate if we
3861 try and rethrow an exception, when there is no exception currently
3866 /* Start at our stack frame. */
3868 my_udata
= __frame_state_for (&&label
, my_udata
);
3872 /* We need to get the value from the CFA register. */
3873 my_udata
->cfa
= __builtin_dwarf_cfa ();
3875 /* Do any necessary initialization to access arbitrary stack frames.
3876 On the SPARC, this means flushing the register windows. */
3877 __builtin_unwind_init ();
3879 /* Now reset pc to the right throw point. */
3880 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3882 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3886 __builtin_eh_return ((void *)eh
, offset
, handler
);
3888 /* Epilogue: restore the handler frame's register values and return
3892 /*extern void __rethrow(void *) __attribute__ ((__noreturn__));*/
3895 __rethrow (void *index
)
3897 struct eh_context
*eh
= (*get_eh_context
) ();
3901 /* XXX maybe make my_ustruct static so we don't have to look it up for
3903 frame_state my_ustruct
, *my_udata
= &my_ustruct
;
3905 /* This is required for C++ semantics. We must call terminate if we
3906 try and rethrow an exception, when there is no exception currently
3911 /* This is the table index we want to rethrow from. The value of
3912 the END_REGION label is used for the PC of the throw, and the
3913 search begins with the next table entry. */
3914 eh
->table_index
= index
;
3916 /* Start at our stack frame. */
3918 my_udata
= __frame_state_for (&&label
, my_udata
);
3922 /* We need to get the value from the CFA register. */
3923 my_udata
->cfa
= __builtin_dwarf_cfa ();
3925 /* Do any necessary initialization to access arbitrary stack frames.
3926 On the SPARC, this means flushing the register windows. */
3927 __builtin_unwind_init ();
3929 /* Now reset pc to the right throw point. */
3930 pc
= __builtin_extract_return_addr (__builtin_return_address (0)) - 1;
3932 handler
= throw_helper (eh
, pc
, my_udata
, &offset
);
3936 __builtin_eh_return ((void *)eh
, offset
, handler
);
3938 /* Epilogue: restore the handler frame's register values and return
3941 #endif /* DWARF2_UNWIND_INFO */
3943 #ifdef IA64_UNWIND_INFO
3946 /* Return handler to which we want to transfer control, NULL if we don't
3947 intend to handle this exception here. */
3949 __ia64_personality_v1 (void *pc
, old_exception_table
*table
)
3956 for (pos
= 0; table
[pos
].start_region
!= (void *) -1; ++pos
)
3958 if (table
[pos
].start_region
<= pc
&& table
[pos
].end_region
> pc
)
3960 /* This can apply. Make sure it is at least as small as
3961 the previous best. */
3962 if (best
== -1 || (table
[pos
].end_region
<= table
[best
].end_region
3963 && table
[pos
].start_region
>= table
[best
].start_region
))
3966 /* It is sorted by starting PC within a function. */
3967 else if (best
>= 0 && table
[pos
].start_region
> pc
)
3971 return table
[best
].exception_handler
;
3977 ia64_throw_helper (throw_pc
, throw_frame
, caller
, throw_bsp
)
3979 ia64_frame_state
*throw_frame
;
3980 ia64_frame_state
*caller
;
3983 unwind_info_ptr
*info
;
3984 void *pc
, *handler
= NULL
;
3989 __builtin_ia64_flushrs (); /* Make the local register stacks available. */
3991 /* Start at our stack frame, get our state. */
3992 __build_ia64_frame_state (throw_pc
, throw_frame
, throw_bsp
, &pc_base
);
3994 /* Now we have to find the proper frame for pc, and see if there
3995 is a handler for it. if not, we keep going back frames until
3996 we do find one. Otherwise we call uncaught (). */
3999 memcpy (caller
, throw_frame
, sizeof (*caller
));
4002 void *(*personality
) ();
4006 /* We only care about the RP right now, so we dont need to keep
4007 any other information about a call frame right now. */
4008 pc
= __get_real_reg_value (&caller
->rp
) - 1;
4009 bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&caller
->pfs
), caller
->my_bsp
);
4010 info
= __build_ia64_frame_state (pc
, caller
, bsp
, &pc_base
);
4012 /* If we couldn't find the next frame, we lose. */
4016 personality
= __get_personality (info
);
4017 /* TODO Haven't figured out how to actually load the personality address
4018 yet, so just always default to the one we expect for now. */
4019 if (personality
!= 0)
4020 personality
= __ia64_personality_v1
;
4021 eh_table
= __get_except_table (info
);
4022 /* If there is no personality routine, we'll keep unwinding. */
4024 /* Pass a segment relative PC address to the personality routine,
4025 because the unwind_info section uses segrel relocs. */
4026 handler
= personality (pc
- pc_base
, eh_table
);
4032 /* Handler is a segment relative address, so we must adjust it here. */
4033 handler
+= (long) pc_base
;
4035 /* If we found a handler, we need to unwind the stack to that point.
4036 We do this by copying saved values from previous frames into the
4037 save slot for the throw_frame saved slots. when __throw returns,
4038 it'll pickup the correct values. */
4040 /* Start with where __throw saved things, and copy each saved register
4041 of each previous frame until we get to the one before we're
4042 throwing back to. */
4043 memcpy (caller
, throw_frame
, sizeof (*caller
));
4044 for ( ; frame_count
> 0; frame_count
--)
4046 pc
= __get_real_reg_value (&caller
->rp
) - 1;
4047 bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&caller
->pfs
), caller
->my_bsp
);
4048 __build_ia64_frame_state (pc
, caller
, bsp
, &pc_base
);
4049 /* Any regs that were saved can be put in the throw frame now. */
4050 /* We don't want to copy any saved register from the
4051 target destination, but we do want to load up it's frame. */
4052 if (frame_count
> 1)
4053 __copy_saved_reg_state (throw_frame
, caller
);
4056 /* Set return address of the throw frame to the handler. */
4057 __set_real_reg_value (&throw_frame
->rp
, handler
);
4059 /* TODO, do we need to do anything to make the values we wrote 'stick'? */
4060 /* DO we need to go through the whole loadrs seqeunce? */
4067 struct eh_context
*eh
= (*get_eh_context
) ();
4068 ia64_frame_state my_frame
;
4069 ia64_frame_state originator
; /* For the context handler is in. */
4070 void *bsp
, *tmp_bsp
;
4073 /* This is required for C++ semantics. We must call terminate if we
4074 try and rethrow an exception, when there is no exception currently
4079 __builtin_unwind_init ();
4081 /* We have to call another routine to actually process the frame
4082 information, which will force all of __throw's local registers into
4085 /* Get the value of ar.bsp while we're here. */
4087 bsp
= __builtin_ia64_bsp ();
4088 ia64_throw_helper (&&label_ia64
, &my_frame
, &originator
, bsp
);
4090 /* Now we have to fudge the bsp by the amount in our (__throw)
4091 frame marker, since the return is going to adjust it by that much. */
4093 tmp_bsp
= __calc_caller_bsp ((long)__get_real_reg_value (&my_frame
.pfs
),
4095 offset
= (char *)my_frame
.my_bsp
- (char *)tmp_bsp
;
4096 tmp_bsp
= (char *)originator
.my_bsp
+ offset
;
4098 /* A throw handler is trated like a non-local goto, which is architeched
4099 to set the FP (or PSP) in r7 before branching. gr[0-3] map to
4100 r4-r7, so we want gr[3]. */
4101 __set_real_reg_value (&my_frame
.gr
[3], __get_real_reg_value (&originator
.psp
));
4103 __builtin_eh_return (tmp_bsp
, offset
, originator
.my_sp
);
4105 /* The return address was already set by throw_helper. */
4108 #endif /* IA64_UNWIND_INFO */
4113 #ifndef inhibit_libc
4114 /* This gets us __GNU_LIBRARY__. */
4115 #undef NULL /* Avoid errors if stdio.h and our stddef.h mismatch. */
4118 #ifdef __GNU_LIBRARY__
4119 /* Avoid forcing the library's meaning of `write' on the user program
4120 by using the "internal" name (for use within the library) */
4121 #define write(fd, buf, n) __write((fd), (buf), (n))
4123 #endif /* inhibit_libc */
4125 #define MESSAGE "pure virtual method called\n"
4128 __pure_virtual (void)
4130 #ifndef inhibit_libc
4131 write (2, MESSAGE
, sizeof (MESSAGE
) - 1);