1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
21 GNU CC is distributed in the hope that it will be useful,
22 but WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 GNU General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with GNU CC; see the file COPYING. If not, write to
28 the Free Software Foundation, 59 Temple Place - Suite 330,
29 Boston, MA 02111-1307, USA. */
32 # include <shlib-compat.h>
35 #if !defined _LIBC || SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2_5)
40 #include <bits/libc-lock.h>
43 #define NO_BASE_OF_ENCODED_VALUE
44 #include <unwind-pe.h>
45 #include <unwind-dw2-fde.h>
51 #define NO_BASE_OF_ENCODED_VALUE
52 #include "unwind-pe.h"
53 #include "unwind-dw2-fde.h"
57 /* The unseen_objects list contains objects that have been registered
58 but not yet categorized in any way. The seen_objects list has had
59 it's pc_begin and count fields initialized at minimum, and is sorted
60 by decreasing value of pc_begin. */
61 static struct object
*unseen_objects
;
62 static struct object
*seen_objects
;
66 __libc_lock_define_initialized (static, object_mutex
)
67 #define init_object_mutex_once()
68 #define __gthread_mutex_lock(m) __libc_lock_lock (*(m))
69 #define __gthread_mutex_unlock(m) __libc_lock_unlock (*(m))
71 void __register_frame_info_bases_internal (void *begin
, struct object
*ob
,
72 void *tbase
, void *dbase
);
73 void __register_frame_info_table_bases_internal (void *begin
,
75 void *tbase
, void *dbase
);
76 void *__deregister_frame_info_bases_internal (void *begin
);
80 #ifdef __GTHREAD_MUTEX_INIT
81 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
83 static __gthread_mutex_t object_mutex
;
86 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
88 init_object_mutex (void)
90 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
94 init_object_mutex_once (void)
96 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
97 __gthread_once (&once
, init_object_mutex
);
100 #define init_object_mutex_once()
105 /* Called from crtbegin.o to register the unwind info for an object. */
108 __register_frame_info_bases (void *begin
, struct object
*ob
,
109 void *tbase
, void *dbase
)
111 /* If .eh_frame is empty, don't register at all. */
112 if (*(uword
*)begin
== 0)
115 ob
->pc_begin
= (void *)-1;
118 ob
->u
.single
= begin
;
120 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
122 init_object_mutex_once ();
123 __gthread_mutex_lock (&object_mutex
);
125 ob
->next
= unseen_objects
;
128 __gthread_mutex_unlock (&object_mutex
);
130 INTDEF(__register_frame_info_bases
)
133 __register_frame_info (void *begin
, struct object
*ob
)
135 INTUSE(__register_frame_info_bases
) (begin
, ob
, 0, 0);
139 __register_frame (void *begin
)
143 /* If .eh_frame is empty, don't register at all. */
144 if (*(uword
*)begin
== 0)
147 ob
= (struct object
*) malloc (sizeof (struct object
));
148 INTUSE(__register_frame_info_bases
) (begin
, ob
, 0, 0);
151 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
152 for different translation units. Called from the file generated by
156 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
157 void *tbase
, void *dbase
)
159 ob
->pc_begin
= (void *)-1;
164 ob
->s
.b
.from_array
= 1;
165 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
167 init_object_mutex_once ();
168 __gthread_mutex_lock (&object_mutex
);
170 ob
->next
= unseen_objects
;
173 __gthread_mutex_unlock (&object_mutex
);
175 INTDEF(__register_frame_info_table_bases
)
178 __register_frame_info_table (void *begin
, struct object
*ob
)
180 INTUSE(__register_frame_info_table_bases
) (begin
, ob
, 0, 0);
184 __register_frame_table (void *begin
)
186 struct object
*ob
= (struct object
*) malloc (sizeof (struct object
));
187 INTUSE(__register_frame_info_table_bases
) (begin
, ob
, 0, 0);
190 /* Called from crtbegin.o to deregister the unwind info for an object. */
191 /* ??? Glibc has for a while now exported __register_frame_info and
192 __deregister_frame_info. If we call __register_frame_info_bases
193 from crtbegin (wherein it is declared weak), and this object does
194 not get pulled from libgcc.a for other reasons, then the
195 invocation of __deregister_frame_info will be resolved from glibc.
196 Since the registration did not happen there, we'll abort.
198 Therefore, declare a new deregistration entry point that does the
199 exact same thing, but will resolve to the same library as
200 implements __register_frame_info_bases. */
203 __deregister_frame_info_bases (void *begin
)
206 struct object
*ob
= 0;
208 /* If .eh_frame is empty, we haven't registered. */
209 if (*(uword
*)begin
== 0)
212 init_object_mutex_once ();
213 __gthread_mutex_lock (&object_mutex
);
215 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
216 if ((*p
)->u
.single
== begin
)
223 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
224 if ((*p
)->s
.b
.sorted
)
226 if ((*p
)->u
.sort
->orig_data
== begin
)
236 if ((*p
)->u
.single
== begin
)
244 __gthread_mutex_unlock (&object_mutex
);
248 __gthread_mutex_unlock (&object_mutex
);
251 INTDEF(__deregister_frame_info_bases
)
254 __deregister_frame_info (void *begin
)
256 return INTUSE(__deregister_frame_info_bases
) (begin
);
260 __deregister_frame (void *begin
)
262 /* If .eh_frame is empty, we haven't registered. */
263 if (*(uword
*)begin
!= 0)
264 free (INTUSE(__deregister_frame_info_bases
) (begin
));
268 /* Like base_of_encoded_value, but take the base from a struct object
269 instead of an _Unwind_Context. */
272 base_from_object (unsigned char encoding
, struct object
*ob
)
274 if (encoding
== DW_EH_PE_omit
)
277 switch (encoding
& 0x70)
279 case DW_EH_PE_absptr
:
281 case DW_EH_PE_aligned
:
284 case DW_EH_PE_textrel
:
285 return (_Unwind_Ptr
) ob
->tbase
;
286 case DW_EH_PE_datarel
:
287 return (_Unwind_Ptr
) ob
->dbase
;
292 /* Return the FDE pointer encoding from the CIE. */
293 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
296 get_cie_encoding (struct dwarf_cie
*cie
)
298 const unsigned char *aug
, *p
;
301 aug
= cie
->augmentation
;
303 return DW_EH_PE_absptr
;
305 p
= aug
+ strlen (aug
) + 1; /* Skip the augmentation string. */
306 p
= read_uleb128 (p
, &dummy
); /* Skip code alignment. */
307 p
= read_sleb128 (p
, &dummy
); /* Skip data alignment. */
308 p
++; /* Skip return address column. */
310 aug
++; /* Skip 'z' */
311 p
= read_uleb128 (p
, &dummy
); /* Skip augmentation length. */
314 /* This is what we're looking for. */
317 /* Personality encoding and pointer. */
318 else if (*aug
== 'P')
320 /* ??? Avoid dereferencing indirect pointers, since we're
321 faking the base address. Gotta keep DW_EH_PE_aligned
323 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
326 else if (*aug
== 'L')
328 /* Otherwise end of string, or unknown augmentation. */
330 return DW_EH_PE_absptr
;
336 get_fde_encoding (struct dwarf_fde
*f
)
338 return get_cie_encoding (get_cie (f
));
342 /* Sorting an array of FDEs by address.
343 (Ideally we would have the linker sort the FDEs so we don't have to do
344 it at run time. But the linkers are not yet prepared for this.) */
346 /* Comparison routines. Three variants of increasing complexity. */
349 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
352 return *(saddr
*)x
->pc_begin
- *(saddr
*)y
->pc_begin
;
356 fde_single_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
358 _Unwind_Ptr base
, x_ptr
, y_ptr
;
360 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
361 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
362 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
364 return x_ptr
- y_ptr
;
368 fde_mixed_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
370 int x_encoding
, y_encoding
;
371 _Unwind_Ptr x_ptr
, y_ptr
;
373 x_encoding
= get_fde_encoding (x
);
374 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
375 x
->pc_begin
, &x_ptr
);
377 y_encoding
= get_fde_encoding (y
);
378 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
379 y
->pc_begin
, &y_ptr
);
381 return x_ptr
- y_ptr
;
384 typedef saddr (*fde_compare_t
) (struct object
*, fde
*, fde
*);
387 /* This is a special mix of insertion sort and heap sort, optimized for
388 the data sets that actually occur. They look like
389 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
390 I.e. a linearly increasing sequence (coming from functions in the text
391 section), with additionally a few unordered elements (coming from functions
392 in gnu_linkonce sections) whose values are higher than the values in the
393 surrounding linear sequence (but not necessarily higher than the values
394 at the end of the linear sequence!).
395 The worst-case total run time is O(N) + O(n log (n)), where N is the
396 total number of FDEs and n is the number of erratic ones. */
398 struct fde_accumulator
400 struct fde_vector
*linear
;
401 struct fde_vector
*erratic
;
405 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
411 size
= sizeof (struct fde_vector
) + sizeof (fde
*) * count
;
412 if ((accu
->linear
= (struct fde_vector
*) malloc (size
)))
414 accu
->linear
->count
= 0;
415 if ((accu
->erratic
= (struct fde_vector
*) malloc (size
)))
416 accu
->erratic
->count
= 0;
424 fde_insert (struct fde_accumulator
*accu
, fde
*this_fde
)
427 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
430 /* Split LINEAR into a linear sequence with low values and an erratic
431 sequence with high values, put the linear one (of longest possible
432 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
434 Because the longest linear sequence we are trying to locate within the
435 incoming LINEAR array can be interspersed with (high valued) erratic
436 entries. We construct a chain indicating the sequenced entries.
437 To avoid having to allocate this chain, we overlay it onto the space of
438 the ERRATIC array during construction. A final pass iterates over the
439 chain to determine what should be placed in the ERRATIC array, and
440 what is the linear sequence. This overlay is safe from aliasing. */
443 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
444 struct fde_vector
*linear
, struct fde_vector
*erratic
)
447 size_t count
= linear
->count
;
448 fde
**chain_end
= &marker
;
451 /* This should optimize out, but it is wise to make sure this assumption
452 is correct. Should these have different sizes, we cannot cast between
453 them and the overlaying onto ERRATIC will not work. */
454 if (sizeof (fde
*) != sizeof (fde
**))
457 for (i
= 0; i
< count
; i
++)
461 for (probe
= chain_end
;
462 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
465 chain_end
= (fde
**)erratic
->array
[probe
- linear
->array
];
466 erratic
->array
[probe
- linear
->array
] = NULL
;
468 erratic
->array
[i
] = (fde
*)chain_end
;
469 chain_end
= &linear
->array
[i
];
472 /* Each entry in LINEAR which is part of the linear sequence we have
473 discovered will correspond to a non-NULL entry in the chain we built in
474 the ERRATIC array. */
475 for (i
= j
= k
= 0; i
< count
; i
++)
476 if (erratic
->array
[i
])
477 linear
->array
[j
++] = linear
->array
[i
];
479 erratic
->array
[k
++] = linear
->array
[i
];
484 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
485 use a name that does not conflict. */
488 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
489 struct fde_vector
*erratic
)
491 /* For a description of this algorithm, see:
492 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
494 fde
** a
= erratic
->array
;
495 /* A portion of the array is called a "heap" if for all i>=0:
496 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
497 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
498 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
499 size_t n
= erratic
->count
;
505 /* Invariant: a[m..n-1] is a heap. */
507 for (i
= m
; 2*i
+1 < n
; )
510 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
511 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
513 SWAP (a
[i
], a
[2*i
+2]);
516 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
518 SWAP (a
[i
], a
[2*i
+1]);
527 /* Invariant: a[0..n-1] is a heap. */
530 for (i
= 0; 2*i
+1 < n
; )
533 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
534 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
536 SWAP (a
[i
], a
[2*i
+2]);
539 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
541 SWAP (a
[i
], a
[2*i
+1]);
551 /* Merge V1 and V2, both sorted, and put the result into V1. */
553 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
554 struct fde_vector
*v1
, struct fde_vector
*v2
)
565 fde2
= v2
->array
[i2
];
566 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
568 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
571 v1
->array
[i1
+i2
] = fde2
;
573 v1
->count
+= v2
->count
;
578 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
580 fde_compare_t fde_compare
;
582 if (accu
->linear
&& accu
->linear
->count
!= count
)
585 if (ob
->s
.b
.mixed_encoding
)
586 fde_compare
= fde_mixed_encoding_compare
;
587 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
588 fde_compare
= fde_unencoded_compare
;
590 fde_compare
= fde_single_encoding_compare
;
594 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
595 if (accu
->linear
->count
+ accu
->erratic
->count
!= count
)
597 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
598 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
599 free (accu
->erratic
);
603 /* We've not managed to malloc an erratic array,
604 so heap sort in the linear one. */
605 frame_heapsort (ob
, fde_compare
, accu
->linear
);
610 /* Update encoding, mixed_encoding, and pc_begin for OB for the
611 fde array beginning at THIS_FDE. Return the number of fdes
612 encountered along the way. */
615 classify_object_over_fdes (struct object
*ob
, fde
*this_fde
)
617 struct dwarf_cie
*last_cie
= 0;
619 int encoding
= DW_EH_PE_absptr
;
620 _Unwind_Ptr base
= 0;
622 for (; this_fde
->length
!= 0; this_fde
= next_fde (this_fde
))
624 struct dwarf_cie
*this_cie
;
625 _Unwind_Ptr mask
, pc_begin
;
628 if (this_fde
->CIE_delta
== 0)
631 /* Determine the encoding for this FDE. Note mixed encoded
632 objects for later. */
633 this_cie
= get_cie (this_fde
);
634 if (this_cie
!= last_cie
)
637 encoding
= get_cie_encoding (this_cie
);
638 base
= base_from_object (encoding
, ob
);
639 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
640 ob
->s
.b
.encoding
= encoding
;
641 else if (ob
->s
.b
.encoding
!= encoding
)
642 ob
->s
.b
.mixed_encoding
= 1;
645 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
648 /* Take care to ignore link-once functions that were removed.
649 In these cases, the function address will be NULL, but if
650 the encoding is smaller than a pointer a true NULL may not
651 be representable. Assume 0 in the representable bits is NULL. */
652 mask
= size_of_encoded_value (encoding
);
653 if (mask
< sizeof (void *))
654 mask
= (1L << (mask
<< 3)) - 1;
658 if ((pc_begin
& mask
) == 0)
662 if ((void *)pc_begin
< ob
->pc_begin
)
663 ob
->pc_begin
= (void *)pc_begin
;
670 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, fde
*this_fde
)
672 struct dwarf_cie
*last_cie
= 0;
673 int encoding
= ob
->s
.b
.encoding
;
674 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
676 for (; this_fde
->length
!= 0; this_fde
= next_fde (this_fde
))
678 struct dwarf_cie
*this_cie
;
681 if (this_fde
->CIE_delta
== 0)
684 if (ob
->s
.b
.mixed_encoding
)
686 /* Determine the encoding for this FDE. Note mixed encoded
687 objects for later. */
688 this_cie
= get_cie (this_fde
);
689 if (this_cie
!= last_cie
)
692 encoding
= get_cie_encoding (this_cie
);
693 base
= base_from_object (encoding
, ob
);
697 if (encoding
== DW_EH_PE_absptr
)
699 if (*(_Unwind_Ptr
*)this_fde
->pc_begin
== 0)
704 _Unwind_Ptr pc_begin
, mask
;
706 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
709 /* Take care to ignore link-once functions that were removed.
710 In these cases, the function address will be NULL, but if
711 the encoding is smaller than a pointer a true NULL may not
712 be representable. Assume 0 in the representable bits is NULL. */
713 mask
= size_of_encoded_value (encoding
);
714 if (mask
< sizeof (void *))
715 mask
= (1L << (mask
<< 3)) - 1;
719 if ((pc_begin
& mask
) == 0)
723 fde_insert (accu
, this_fde
);
727 /* Set up a sorted array of pointers to FDEs for a loaded object. We
728 count up the entries before allocating the array because it's likely to
729 be faster. We can be called multiple times, should we have failed to
730 allocate a sorted fde array on a previous occasion. */
733 init_object (struct object
* ob
)
735 struct fde_accumulator accu
;
738 count
= ob
->s
.b
.count
;
741 if (ob
->s
.b
.from_array
)
743 fde
**p
= ob
->u
.array
;
744 for (count
= 0; *p
; ++p
)
745 count
+= classify_object_over_fdes (ob
, *p
);
748 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
750 /* The count field we have in the main struct object is somewhat
751 limited, but should suffice for virtually all cases. If the
752 counted value doesn't fit, re-write a zero. The worst that
753 happens is that we re-count next time -- admittedly non-trivial
754 in that this implies some 2M fdes, but at least we function. */
755 ob
->s
.b
.count
= count
;
756 if (ob
->s
.b
.count
!= count
)
760 if (!start_fde_sort (&accu
, count
))
763 if (ob
->s
.b
.from_array
)
766 for (p
= ob
->u
.array
; *p
; ++p
)
767 add_fdes (ob
, &accu
, *p
);
770 add_fdes (ob
, &accu
, ob
->u
.single
);
772 end_fde_sort (ob
, &accu
, count
);
774 /* Save the original fde pointer, since this is the key by which the
775 DSO will deregister the object. */
776 accu
.linear
->orig_data
= ob
->u
.single
;
777 ob
->u
.sort
= accu
.linear
;
782 /* A linear search through a set of FDEs for the given PC. This is
783 used when there was insufficient memory to allocate and sort an
787 linear_search_fdes (struct object
*ob
, fde
*this_fde
, void *pc
)
789 struct dwarf_cie
*last_cie
= 0;
790 int encoding
= ob
->s
.b
.encoding
;
791 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
793 for (; this_fde
->length
!= 0; this_fde
= next_fde (this_fde
))
795 struct dwarf_cie
*this_cie
;
796 _Unwind_Ptr pc_begin
, pc_range
;
799 if (this_fde
->CIE_delta
== 0)
802 if (ob
->s
.b
.mixed_encoding
)
804 /* Determine the encoding for this FDE. Note mixed encoded
805 objects for later. */
806 this_cie
= get_cie (this_fde
);
807 if (this_cie
!= last_cie
)
810 encoding
= get_cie_encoding (this_cie
);
811 base
= base_from_object (encoding
, ob
);
815 if (encoding
== DW_EH_PE_absptr
)
817 pc_begin
= ((_Unwind_Ptr
*)this_fde
->pc_begin
)[0];
818 pc_range
= ((_Unwind_Ptr
*)this_fde
->pc_begin
)[1];
827 p
= read_encoded_value_with_base (encoding
, base
,
828 this_fde
->pc_begin
, &pc_begin
);
829 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
831 /* Take care to ignore link-once functions that were removed.
832 In these cases, the function address will be NULL, but if
833 the encoding is smaller than a pointer a true NULL may not
834 be representable. Assume 0 in the representable bits is NULL. */
835 mask
= size_of_encoded_value (encoding
);
836 if (mask
< sizeof (void *))
837 mask
= (1L << (mask
<< 3)) - 1;
841 if ((pc_begin
& mask
) == 0)
845 if ((_Unwind_Ptr
)pc
- pc_begin
< pc_range
)
852 /* Binary search for an FDE containing the given PC. Here are three
853 implementations of increasing complexity. */
856 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
858 struct fde_vector
*vec
= ob
->u
.sort
;
861 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
863 size_t i
= (lo
+ hi
) / 2;
864 fde
*f
= vec
->array
[i
];
868 pc_begin
= ((void **)f
->pc_begin
)[0];
869 pc_range
= ((uaddr
*)f
->pc_begin
)[1];
873 else if (pc
>= pc_begin
+ pc_range
)
883 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
885 struct fde_vector
*vec
= ob
->u
.sort
;
886 int encoding
= ob
->s
.b
.encoding
;
887 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
890 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
892 size_t i
= (lo
+ hi
) / 2;
893 fde
*f
= vec
->array
[i
];
894 _Unwind_Ptr pc_begin
, pc_range
;
897 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
899 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
901 if ((_Unwind_Ptr
)pc
< pc_begin
)
903 else if ((_Unwind_Ptr
)pc
>= pc_begin
+ pc_range
)
913 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
915 struct fde_vector
*vec
= ob
->u
.sort
;
918 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
920 size_t i
= (lo
+ hi
) / 2;
921 fde
*f
= vec
->array
[i
];
922 _Unwind_Ptr pc_begin
, pc_range
;
926 encoding
= get_fde_encoding (f
);
927 p
= read_encoded_value_with_base (encoding
,
928 base_from_object (encoding
, ob
),
929 f
->pc_begin
, &pc_begin
);
930 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
932 if ((_Unwind_Ptr
)pc
< pc_begin
)
934 else if ((_Unwind_Ptr
)pc
>= pc_begin
+ pc_range
)
944 search_object (struct object
* ob
, void *pc
)
946 /* If the data hasn't been sorted, try to do this now. We may have
947 more memory available than last time we tried. */
948 if (! ob
->s
.b
.sorted
)
952 /* Despite the above comment, the normal reason to get here is
953 that we've not processed this object before. A quick range
954 check is in order. */
955 if (pc
< ob
->pc_begin
)
961 if (ob
->s
.b
.mixed_encoding
)
962 return binary_search_mixed_encoding_fdes (ob
, pc
);
963 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
964 return binary_search_unencoded_fdes (ob
, pc
);
966 return binary_search_single_encoding_fdes (ob
, pc
);
970 /* Long slow labourious linear search, cos we've no memory. */
971 if (ob
->s
.b
.from_array
)
974 for (p
= ob
->u
.array
; *p
; p
++)
976 fde
*f
= linear_search_fdes (ob
, *p
, pc
);
983 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
988 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
993 init_object_mutex_once ();
994 __gthread_mutex_lock (&object_mutex
);
996 /* Linear search through the classified objects, to find the one
997 containing the pc. Note that pc_begin is sorted descending, and
998 we expect objects to be non-overlapping. */
999 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1000 if (pc
>= ob
->pc_begin
)
1002 f
= search_object (ob
, pc
);
1008 /* Classify and search the objects we've not yet processed. */
1009 while ((ob
= unseen_objects
))
1013 unseen_objects
= ob
->next
;
1014 f
= search_object (ob
, pc
);
1016 /* Insert the object into the classified list. */
1017 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1018 if ((*p
)->pc_begin
< ob
->pc_begin
)
1028 __gthread_mutex_unlock (&object_mutex
);
1034 bases
->tbase
= ob
->tbase
;
1035 bases
->dbase
= ob
->dbase
;
1037 encoding
= ob
->s
.b
.encoding
;
1038 if (ob
->s
.b
.mixed_encoding
)
1039 encoding
= get_fde_encoding (f
);
1040 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1041 f
->pc_begin
, (_Unwind_Ptr
*)&bases
->func
);