1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
3 2009 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #ifndef _Unwind_Find_FDE
30 #include "coretypes.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
40 /* The unseen_objects list contains objects that have been registered
41 but not yet categorized in any way. The seen_objects list has had
42 its pc_begin and count fields initialized at minimum, and is sorted
43 by decreasing value of pc_begin. */
44 static struct object
*unseen_objects
;
45 static struct object
*seen_objects
;
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
50 static __gthread_mutex_t object_mutex
;
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
55 init_object_mutex (void)
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
61 init_object_mutex_once (void)
63 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
64 __gthread_once (&once
, init_object_mutex
);
67 #define init_object_mutex_once()
70 /* Called from crtbegin.o to register the unwind info for an object. */
73 __register_frame_info_bases (const void *begin
, struct object
*ob
,
74 void *tbase
, void *dbase
)
76 /* If .eh_frame is empty, don't register at all. */
77 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
80 ob
->pc_begin
= (void *)-1;
85 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
86 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
90 init_object_mutex_once ();
91 __gthread_mutex_lock (&object_mutex
);
93 ob
->next
= unseen_objects
;
96 __gthread_mutex_unlock (&object_mutex
);
100 __register_frame_info (const void *begin
, struct object
*ob
)
102 __register_frame_info_bases (begin
, ob
, 0, 0);
106 __register_frame (void *begin
)
110 /* If .eh_frame is empty, don't register at all. */
111 if (*(uword
*) begin
== 0)
114 ob
= malloc (sizeof (struct object
));
115 __register_frame_info (begin
, ob
);
118 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
119 for different translation units. Called from the file generated by
123 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
124 void *tbase
, void *dbase
)
126 ob
->pc_begin
= (void *)-1;
131 ob
->s
.b
.from_array
= 1;
132 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
134 init_object_mutex_once ();
135 __gthread_mutex_lock (&object_mutex
);
137 ob
->next
= unseen_objects
;
140 __gthread_mutex_unlock (&object_mutex
);
144 __register_frame_info_table (void *begin
, struct object
*ob
)
146 __register_frame_info_table_bases (begin
, ob
, 0, 0);
150 __register_frame_table (void *begin
)
152 struct object
*ob
= malloc (sizeof (struct object
));
153 __register_frame_info_table (begin
, ob
);
156 /* Called from crtbegin.o to deregister the unwind info for an object. */
157 /* ??? Glibc has for a while now exported __register_frame_info and
158 __deregister_frame_info. If we call __register_frame_info_bases
159 from crtbegin (wherein it is declared weak), and this object does
160 not get pulled from libgcc.a for other reasons, then the
161 invocation of __deregister_frame_info will be resolved from glibc.
162 Since the registration did not happen there, we'll die.
164 Therefore, declare a new deregistration entry point that does the
165 exact same thing, but will resolve to the same library as
166 implements __register_frame_info_bases. */
169 __deregister_frame_info_bases (const void *begin
)
172 struct object
*ob
= 0;
174 /* If .eh_frame is empty, we haven't registered. */
175 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
178 init_object_mutex_once ();
179 __gthread_mutex_lock (&object_mutex
);
181 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
182 if ((*p
)->u
.single
== begin
)
189 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
190 if ((*p
)->s
.b
.sorted
)
192 if ((*p
)->u
.sort
->orig_data
== begin
)
202 if ((*p
)->u
.single
== begin
)
211 __gthread_mutex_unlock (&object_mutex
);
217 __deregister_frame_info (const void *begin
)
219 return __deregister_frame_info_bases (begin
);
223 __deregister_frame (void *begin
)
225 /* If .eh_frame is empty, we haven't registered. */
226 if (*(uword
*) begin
!= 0)
227 free (__deregister_frame_info (begin
));
231 /* Like base_of_encoded_value, but take the base from a struct object
232 instead of an _Unwind_Context. */
235 base_from_object (unsigned char encoding
, struct object
*ob
)
237 if (encoding
== DW_EH_PE_omit
)
240 switch (encoding
& 0x70)
242 case DW_EH_PE_absptr
:
244 case DW_EH_PE_aligned
:
247 case DW_EH_PE_textrel
:
248 return (_Unwind_Ptr
) ob
->tbase
;
249 case DW_EH_PE_datarel
:
250 return (_Unwind_Ptr
) ob
->dbase
;
256 /* Return the FDE pointer encoding from the CIE. */
257 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
260 get_cie_encoding (const struct dwarf_cie
*cie
)
262 const unsigned char *aug
, *p
;
267 aug
= cie
->augmentation
;
269 return DW_EH_PE_absptr
;
271 p
= aug
+ strlen ((const char *)aug
) + 1; /* Skip the augmentation string. */
272 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
273 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
274 if (cie
->version
== 1) /* Skip return address column. */
277 p
= read_uleb128 (p
, &utmp
);
279 aug
++; /* Skip 'z' */
280 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
283 /* This is what we're looking for. */
286 /* Personality encoding and pointer. */
287 else if (*aug
== 'P')
289 /* ??? Avoid dereferencing indirect pointers, since we're
290 faking the base address. Gotta keep DW_EH_PE_aligned
292 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
295 else if (*aug
== 'L')
297 /* Otherwise end of string, or unknown augmentation. */
299 return DW_EH_PE_absptr
;
305 get_fde_encoding (const struct dwarf_fde
*f
)
307 return get_cie_encoding (get_cie (f
));
311 /* Sorting an array of FDEs by address.
312 (Ideally we would have the linker sort the FDEs so we don't have to do
313 it at run time. But the linkers are not yet prepared for this.) */
315 /* Comparison routines. Three variants of increasing complexity. */
318 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
319 const fde
*x
, const fde
*y
)
321 const _Unwind_Ptr x_ptr
= *(const _Unwind_Ptr
*) x
->pc_begin
;
322 const _Unwind_Ptr y_ptr
= *(const _Unwind_Ptr
*) y
->pc_begin
;
332 fde_single_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
334 _Unwind_Ptr base
, x_ptr
, y_ptr
;
336 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
337 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
338 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
348 fde_mixed_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
350 int x_encoding
, y_encoding
;
351 _Unwind_Ptr x_ptr
, y_ptr
;
353 x_encoding
= get_fde_encoding (x
);
354 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
355 x
->pc_begin
, &x_ptr
);
357 y_encoding
= get_fde_encoding (y
);
358 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
359 y
->pc_begin
, &y_ptr
);
368 typedef int (*fde_compare_t
) (struct object
*, const fde
*, const fde
*);
371 /* This is a special mix of insertion sort and heap sort, optimized for
372 the data sets that actually occur. They look like
373 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
374 I.e. a linearly increasing sequence (coming from functions in the text
375 section), with additionally a few unordered elements (coming from functions
376 in gnu_linkonce sections) whose values are higher than the values in the
377 surrounding linear sequence (but not necessarily higher than the values
378 at the end of the linear sequence!).
379 The worst-case total run time is O(N) + O(n log (n)), where N is the
380 total number of FDEs and n is the number of erratic ones. */
382 struct fde_accumulator
384 struct fde_vector
*linear
;
385 struct fde_vector
*erratic
;
389 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
395 size
= sizeof (struct fde_vector
) + sizeof (const fde
*) * count
;
396 if ((accu
->linear
= malloc (size
)))
398 accu
->linear
->count
= 0;
399 if ((accu
->erratic
= malloc (size
)))
400 accu
->erratic
->count
= 0;
408 fde_insert (struct fde_accumulator
*accu
, const fde
*this_fde
)
411 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
414 /* Split LINEAR into a linear sequence with low values and an erratic
415 sequence with high values, put the linear one (of longest possible
416 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
418 Because the longest linear sequence we are trying to locate within the
419 incoming LINEAR array can be interspersed with (high valued) erratic
420 entries. We construct a chain indicating the sequenced entries.
421 To avoid having to allocate this chain, we overlay it onto the space of
422 the ERRATIC array during construction. A final pass iterates over the
423 chain to determine what should be placed in the ERRATIC array, and
424 what is the linear sequence. This overlay is safe from aliasing. */
427 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
428 struct fde_vector
*linear
, struct fde_vector
*erratic
)
430 static const fde
*marker
;
431 size_t count
= linear
->count
;
432 const fde
*const *chain_end
= &marker
;
435 /* This should optimize out, but it is wise to make sure this assumption
436 is correct. Should these have different sizes, we cannot cast between
437 them and the overlaying onto ERRATIC will not work. */
438 gcc_assert (sizeof (const fde
*) == sizeof (const fde
**));
440 for (i
= 0; i
< count
; i
++)
442 const fde
*const *probe
;
444 for (probe
= chain_end
;
445 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
448 chain_end
= (const fde
*const*) erratic
->array
[probe
- linear
->array
];
449 erratic
->array
[probe
- linear
->array
] = NULL
;
451 erratic
->array
[i
] = (const fde
*) chain_end
;
452 chain_end
= &linear
->array
[i
];
455 /* Each entry in LINEAR which is part of the linear sequence we have
456 discovered will correspond to a non-NULL entry in the chain we built in
457 the ERRATIC array. */
458 for (i
= j
= k
= 0; i
< count
; i
++)
459 if (erratic
->array
[i
])
460 linear
->array
[j
++] = linear
->array
[i
];
462 erratic
->array
[k
++] = linear
->array
[i
];
467 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
469 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
470 for the first (root) node; push it down to its rightful place. */
473 frame_downheap (struct object
*ob
, fde_compare_t fde_compare
, const fde
**a
,
478 for (i
= lo
, j
= 2*i
+1;
482 if (j
+1 < hi
&& fde_compare (ob
, a
[j
], a
[j
+1]) < 0)
485 if (fde_compare (ob
, a
[i
], a
[j
]) < 0)
495 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
496 use a name that does not conflict. */
499 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
500 struct fde_vector
*erratic
)
502 /* For a description of this algorithm, see:
503 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
505 const fde
** a
= erratic
->array
;
506 /* A portion of the array is called a "heap" if for all i>=0:
507 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
508 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
509 size_t n
= erratic
->count
;
512 /* Expand our heap incrementally from the end of the array, heapifying
513 each resulting semi-heap as we go. After each step, a[m] is the top
515 for (m
= n
/2-1; m
>= 0; --m
)
516 frame_downheap (ob
, fde_compare
, a
, m
, n
);
518 /* Shrink our heap incrementally from the end of the array, first
519 swapping out the largest element a[0] and then re-heapifying the
520 resulting semi-heap. After each step, a[0..m) is a heap. */
521 for (m
= n
-1; m
>= 1; --m
)
524 frame_downheap (ob
, fde_compare
, a
, 0, m
);
529 /* Merge V1 and V2, both sorted, and put the result into V1. */
531 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
532 struct fde_vector
*v1
, struct fde_vector
*v2
)
544 fde2
= v2
->array
[i2
];
545 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
547 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
550 v1
->array
[i1
+i2
] = fde2
;
553 v1
->count
+= v2
->count
;
558 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
560 fde_compare_t fde_compare
;
562 gcc_assert (!accu
->linear
|| accu
->linear
->count
== count
);
564 if (ob
->s
.b
.mixed_encoding
)
565 fde_compare
= fde_mixed_encoding_compare
;
566 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
567 fde_compare
= fde_unencoded_compare
;
569 fde_compare
= fde_single_encoding_compare
;
573 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
574 gcc_assert (accu
->linear
->count
+ accu
->erratic
->count
== count
);
575 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
576 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
577 free (accu
->erratic
);
581 /* We've not managed to malloc an erratic array,
582 so heap sort in the linear one. */
583 frame_heapsort (ob
, fde_compare
, accu
->linear
);
588 /* Update encoding, mixed_encoding, and pc_begin for OB for the
589 fde array beginning at THIS_FDE. Return the number of fdes
590 encountered along the way. */
593 classify_object_over_fdes (struct object
*ob
, const fde
*this_fde
)
595 const struct dwarf_cie
*last_cie
= 0;
597 int encoding
= DW_EH_PE_absptr
;
598 _Unwind_Ptr base
= 0;
600 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
602 const struct dwarf_cie
*this_cie
;
603 _Unwind_Ptr mask
, pc_begin
;
606 if (this_fde
->CIE_delta
== 0)
609 /* Determine the encoding for this FDE. Note mixed encoded
610 objects for later. */
611 this_cie
= get_cie (this_fde
);
612 if (this_cie
!= last_cie
)
615 encoding
= get_cie_encoding (this_cie
);
616 base
= base_from_object (encoding
, ob
);
617 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
618 ob
->s
.b
.encoding
= encoding
;
619 else if (ob
->s
.b
.encoding
!= encoding
)
620 ob
->s
.b
.mixed_encoding
= 1;
623 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
626 /* Take care to ignore link-once functions that were removed.
627 In these cases, the function address will be NULL, but if
628 the encoding is smaller than a pointer a true NULL may not
629 be representable. Assume 0 in the representable bits is NULL. */
630 mask
= size_of_encoded_value (encoding
);
631 if (mask
< sizeof (void *))
632 mask
= (1L << (mask
<< 3)) - 1;
636 if ((pc_begin
& mask
) == 0)
640 if ((void *) pc_begin
< ob
->pc_begin
)
641 ob
->pc_begin
= (void *) pc_begin
;
648 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, const fde
*this_fde
)
650 const struct dwarf_cie
*last_cie
= 0;
651 int encoding
= ob
->s
.b
.encoding
;
652 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
654 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
656 const struct dwarf_cie
*this_cie
;
659 if (this_fde
->CIE_delta
== 0)
662 if (ob
->s
.b
.mixed_encoding
)
664 /* Determine the encoding for this FDE. Note mixed encoded
665 objects for later. */
666 this_cie
= get_cie (this_fde
);
667 if (this_cie
!= last_cie
)
670 encoding
= get_cie_encoding (this_cie
);
671 base
= base_from_object (encoding
, ob
);
675 if (encoding
== DW_EH_PE_absptr
)
677 if (*(const _Unwind_Ptr
*) this_fde
->pc_begin
== 0)
682 _Unwind_Ptr pc_begin
, mask
;
684 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
687 /* Take care to ignore link-once functions that were removed.
688 In these cases, the function address will be NULL, but if
689 the encoding is smaller than a pointer a true NULL may not
690 be representable. Assume 0 in the representable bits is NULL. */
691 mask
= size_of_encoded_value (encoding
);
692 if (mask
< sizeof (void *))
693 mask
= (1L << (mask
<< 3)) - 1;
697 if ((pc_begin
& mask
) == 0)
701 fde_insert (accu
, this_fde
);
705 /* Set up a sorted array of pointers to FDEs for a loaded object. We
706 count up the entries before allocating the array because it's likely to
707 be faster. We can be called multiple times, should we have failed to
708 allocate a sorted fde array on a previous occasion. */
711 init_object (struct object
* ob
)
713 struct fde_accumulator accu
;
716 count
= ob
->s
.b
.count
;
719 if (ob
->s
.b
.from_array
)
721 fde
**p
= ob
->u
.array
;
722 for (count
= 0; *p
; ++p
)
723 count
+= classify_object_over_fdes (ob
, *p
);
726 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
728 /* The count field we have in the main struct object is somewhat
729 limited, but should suffice for virtually all cases. If the
730 counted value doesn't fit, re-write a zero. The worst that
731 happens is that we re-count next time -- admittedly non-trivial
732 in that this implies some 2M fdes, but at least we function. */
733 ob
->s
.b
.count
= count
;
734 if (ob
->s
.b
.count
!= count
)
738 if (!start_fde_sort (&accu
, count
))
741 if (ob
->s
.b
.from_array
)
744 for (p
= ob
->u
.array
; *p
; ++p
)
745 add_fdes (ob
, &accu
, *p
);
748 add_fdes (ob
, &accu
, ob
->u
.single
);
750 end_fde_sort (ob
, &accu
, count
);
752 /* Save the original fde pointer, since this is the key by which the
753 DSO will deregister the object. */
754 accu
.linear
->orig_data
= ob
->u
.single
;
755 ob
->u
.sort
= accu
.linear
;
760 /* A linear search through a set of FDEs for the given PC. This is
761 used when there was insufficient memory to allocate and sort an
765 linear_search_fdes (struct object
*ob
, const fde
*this_fde
, void *pc
)
767 const struct dwarf_cie
*last_cie
= 0;
768 int encoding
= ob
->s
.b
.encoding
;
769 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
771 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
773 const struct dwarf_cie
*this_cie
;
774 _Unwind_Ptr pc_begin
, pc_range
;
777 if (this_fde
->CIE_delta
== 0)
780 if (ob
->s
.b
.mixed_encoding
)
782 /* Determine the encoding for this FDE. Note mixed encoded
783 objects for later. */
784 this_cie
= get_cie (this_fde
);
785 if (this_cie
!= last_cie
)
788 encoding
= get_cie_encoding (this_cie
);
789 base
= base_from_object (encoding
, ob
);
793 if (encoding
== DW_EH_PE_absptr
)
795 pc_begin
= ((const _Unwind_Ptr
*) this_fde
->pc_begin
)[0];
796 pc_range
= ((const _Unwind_Ptr
*) this_fde
->pc_begin
)[1];
803 const unsigned char *p
;
805 p
= read_encoded_value_with_base (encoding
, base
,
806 this_fde
->pc_begin
, &pc_begin
);
807 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
809 /* Take care to ignore link-once functions that were removed.
810 In these cases, the function address will be NULL, but if
811 the encoding is smaller than a pointer a true NULL may not
812 be representable. Assume 0 in the representable bits is NULL. */
813 mask
= size_of_encoded_value (encoding
);
814 if (mask
< sizeof (void *))
815 mask
= (1L << (mask
<< 3)) - 1;
819 if ((pc_begin
& mask
) == 0)
823 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
830 /* Binary search for an FDE containing the given PC. Here are three
831 implementations of increasing complexity. */
833 static inline const fde
*
834 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
836 struct fde_vector
*vec
= ob
->u
.sort
;
839 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
841 size_t i
= (lo
+ hi
) / 2;
842 const fde
*const f
= vec
->array
[i
];
843 const void *pc_begin
= ((const void *const*) f
->pc_begin
)[0];
844 const uaddr pc_range
= ((const uaddr
*) f
->pc_begin
)[1];
848 else if (pc
>= pc_begin
+ pc_range
)
857 static inline const fde
*
858 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
860 struct fde_vector
*vec
= ob
->u
.sort
;
861 int encoding
= ob
->s
.b
.encoding
;
862 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
865 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
867 size_t i
= (lo
+ hi
) / 2;
868 const fde
*f
= vec
->array
[i
];
869 _Unwind_Ptr pc_begin
, pc_range
;
870 const unsigned char *p
;
872 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
874 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
876 if ((_Unwind_Ptr
) pc
< pc_begin
)
878 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
887 static inline const fde
*
888 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
890 struct fde_vector
*vec
= ob
->u
.sort
;
893 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
895 size_t i
= (lo
+ hi
) / 2;
896 const fde
*f
= vec
->array
[i
];
897 _Unwind_Ptr pc_begin
, pc_range
;
898 const unsigned char *p
;
901 encoding
= get_fde_encoding (f
);
902 p
= read_encoded_value_with_base (encoding
,
903 base_from_object (encoding
, ob
),
904 f
->pc_begin
, &pc_begin
);
905 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
907 if ((_Unwind_Ptr
) pc
< pc_begin
)
909 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
919 search_object (struct object
* ob
, void *pc
)
921 /* If the data hasn't been sorted, try to do this now. We may have
922 more memory available than last time we tried. */
923 if (! ob
->s
.b
.sorted
)
927 /* Despite the above comment, the normal reason to get here is
928 that we've not processed this object before. A quick range
929 check is in order. */
930 if (pc
< ob
->pc_begin
)
936 if (ob
->s
.b
.mixed_encoding
)
937 return binary_search_mixed_encoding_fdes (ob
, pc
);
938 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
939 return binary_search_unencoded_fdes (ob
, pc
);
941 return binary_search_single_encoding_fdes (ob
, pc
);
945 /* Long slow laborious linear search, cos we've no memory. */
946 if (ob
->s
.b
.from_array
)
949 for (p
= ob
->u
.array
; *p
; p
++)
951 const fde
*f
= linear_search_fdes (ob
, *p
, pc
);
958 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
963 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
968 init_object_mutex_once ();
969 __gthread_mutex_lock (&object_mutex
);
971 /* Linear search through the classified objects, to find the one
972 containing the pc. Note that pc_begin is sorted descending, and
973 we expect objects to be non-overlapping. */
974 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
975 if (pc
>= ob
->pc_begin
)
977 f
= search_object (ob
, pc
);
983 /* Classify and search the objects we've not yet processed. */
984 while ((ob
= unseen_objects
))
988 unseen_objects
= ob
->next
;
989 f
= search_object (ob
, pc
);
991 /* Insert the object into the classified list. */
992 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
993 if ((*p
)->pc_begin
< ob
->pc_begin
)
1003 __gthread_mutex_unlock (&object_mutex
);
1010 bases
->tbase
= ob
->tbase
;
1011 bases
->dbase
= ob
->dbase
;
1013 encoding
= ob
->s
.b
.encoding
;
1014 if (ob
->s
.b
.mixed_encoding
)
1015 encoding
= get_fde_encoding (f
);
1016 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1017 f
->pc_begin
, &func
);
1018 bases
->func
= (void *) func
;