1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
3 2009 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #ifndef _Unwind_Find_FDE
30 #include "coretypes.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
40 /* The unseen_objects list contains objects that have been registered
41 but not yet categorized in any way. The seen_objects list has had
42 its pc_begin and count fields initialized at minimum, and is sorted
43 by decreasing value of pc_begin. */
44 static struct object
*unseen_objects
;
45 static struct object
*seen_objects
;
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
50 static __gthread_mutex_t object_mutex
;
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
55 init_object_mutex (void)
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
61 init_object_mutex_once (void)
63 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
64 __gthread_once (&once
, init_object_mutex
);
67 #define init_object_mutex_once()
70 /* Called from crtbegin.o to register the unwind info for an object. */
73 __register_frame_info_bases (const void *begin
, struct object
*ob
,
74 void *tbase
, void *dbase
)
76 /* If .eh_frame is empty, don't register at all. */
77 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
80 ob
->pc_begin
= (void *)-1;
85 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
86 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
90 init_object_mutex_once ();
91 __gthread_mutex_lock (&object_mutex
);
93 ob
->next
= unseen_objects
;
96 __gthread_mutex_unlock (&object_mutex
);
100 __register_frame_info (const void *begin
, struct object
*ob
)
102 __register_frame_info_bases (begin
, ob
, 0, 0);
106 __register_frame (void *begin
)
110 /* If .eh_frame is empty, don't register at all. */
111 if (*(uword
*) begin
== 0)
114 ob
= malloc (sizeof (struct object
));
115 __register_frame_info (begin
, ob
);
118 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
119 for different translation units. Called from the file generated by
123 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
124 void *tbase
, void *dbase
)
126 ob
->pc_begin
= (void *)-1;
131 ob
->s
.b
.from_array
= 1;
132 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
134 init_object_mutex_once ();
135 __gthread_mutex_lock (&object_mutex
);
137 ob
->next
= unseen_objects
;
140 __gthread_mutex_unlock (&object_mutex
);
144 __register_frame_info_table (void *begin
, struct object
*ob
)
146 __register_frame_info_table_bases (begin
, ob
, 0, 0);
150 __register_frame_table (void *begin
)
152 struct object
*ob
= malloc (sizeof (struct object
));
153 __register_frame_info_table (begin
, ob
);
156 /* Called from crtbegin.o to deregister the unwind info for an object. */
157 /* ??? Glibc has for a while now exported __register_frame_info and
158 __deregister_frame_info. If we call __register_frame_info_bases
159 from crtbegin (wherein it is declared weak), and this object does
160 not get pulled from libgcc.a for other reasons, then the
161 invocation of __deregister_frame_info will be resolved from glibc.
162 Since the registration did not happen there, we'll die.
164 Therefore, declare a new deregistration entry point that does the
165 exact same thing, but will resolve to the same library as
166 implements __register_frame_info_bases. */
169 __deregister_frame_info_bases (const void *begin
)
172 struct object
*ob
= 0;
174 /* If .eh_frame is empty, we haven't registered. */
175 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
178 init_object_mutex_once ();
179 __gthread_mutex_lock (&object_mutex
);
181 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
182 if ((*p
)->u
.single
== begin
)
189 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
190 if ((*p
)->s
.b
.sorted
)
192 if ((*p
)->u
.sort
->orig_data
== begin
)
202 if ((*p
)->u
.single
== begin
)
211 __gthread_mutex_unlock (&object_mutex
);
217 __deregister_frame_info (const void *begin
)
219 return __deregister_frame_info_bases (begin
);
223 __deregister_frame (void *begin
)
225 /* If .eh_frame is empty, we haven't registered. */
226 if (*(uword
*) begin
!= 0)
227 free (__deregister_frame_info (begin
));
231 /* Like base_of_encoded_value, but take the base from a struct object
232 instead of an _Unwind_Context. */
235 base_from_object (unsigned char encoding
, struct object
*ob
)
237 if (encoding
== DW_EH_PE_omit
)
240 switch (encoding
& 0x70)
242 case DW_EH_PE_absptr
:
244 case DW_EH_PE_aligned
:
247 case DW_EH_PE_textrel
:
248 return (_Unwind_Ptr
) ob
->tbase
;
249 case DW_EH_PE_datarel
:
250 return (_Unwind_Ptr
) ob
->dbase
;
256 /* Return the FDE pointer encoding from the CIE. */
257 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
260 get_cie_encoding (const struct dwarf_cie
*cie
)
262 const unsigned char *aug
, *p
;
267 aug
= cie
->augmentation
;
269 return DW_EH_PE_absptr
;
271 p
= aug
+ strlen ((const char *)aug
) + 1; /* Skip the augmentation string. */
272 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
273 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
274 if (cie
->version
== 1) /* Skip return address column. */
277 p
= read_uleb128 (p
, &utmp
);
279 aug
++; /* Skip 'z' */
280 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
283 /* This is what we're looking for. */
286 /* Personality encoding and pointer. */
287 else if (*aug
== 'P')
289 /* ??? Avoid dereferencing indirect pointers, since we're
290 faking the base address. Gotta keep DW_EH_PE_aligned
292 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
295 else if (*aug
== 'L')
297 /* Otherwise end of string, or unknown augmentation. */
299 return DW_EH_PE_absptr
;
305 get_fde_encoding (const struct dwarf_fde
*f
)
307 return get_cie_encoding (get_cie (f
));
311 /* Sorting an array of FDEs by address.
312 (Ideally we would have the linker sort the FDEs so we don't have to do
313 it at run time. But the linkers are not yet prepared for this.) */
315 /* Comparison routines. Three variants of increasing complexity. */
318 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
319 const fde
*x
, const fde
*y
)
321 _Unwind_Ptr x_ptr
, y_ptr
;
322 memcpy (&x_ptr
, x
->pc_begin
, sizeof (_Unwind_Ptr
));
323 memcpy (&y_ptr
, y
->pc_begin
, sizeof (_Unwind_Ptr
));
333 fde_single_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
335 _Unwind_Ptr base
, x_ptr
, y_ptr
;
337 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
338 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
339 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
349 fde_mixed_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
351 int x_encoding
, y_encoding
;
352 _Unwind_Ptr x_ptr
, y_ptr
;
354 x_encoding
= get_fde_encoding (x
);
355 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
356 x
->pc_begin
, &x_ptr
);
358 y_encoding
= get_fde_encoding (y
);
359 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
360 y
->pc_begin
, &y_ptr
);
369 typedef int (*fde_compare_t
) (struct object
*, const fde
*, const fde
*);
372 /* This is a special mix of insertion sort and heap sort, optimized for
373 the data sets that actually occur. They look like
374 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
375 I.e. a linearly increasing sequence (coming from functions in the text
376 section), with additionally a few unordered elements (coming from functions
377 in gnu_linkonce sections) whose values are higher than the values in the
378 surrounding linear sequence (but not necessarily higher than the values
379 at the end of the linear sequence!).
380 The worst-case total run time is O(N) + O(n log (n)), where N is the
381 total number of FDEs and n is the number of erratic ones. */
383 struct fde_accumulator
385 struct fde_vector
*linear
;
386 struct fde_vector
*erratic
;
390 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
396 size
= sizeof (struct fde_vector
) + sizeof (const fde
*) * count
;
397 if ((accu
->linear
= malloc (size
)))
399 accu
->linear
->count
= 0;
400 if ((accu
->erratic
= malloc (size
)))
401 accu
->erratic
->count
= 0;
409 fde_insert (struct fde_accumulator
*accu
, const fde
*this_fde
)
412 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
415 /* Split LINEAR into a linear sequence with low values and an erratic
416 sequence with high values, put the linear one (of longest possible
417 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
419 Because the longest linear sequence we are trying to locate within the
420 incoming LINEAR array can be interspersed with (high valued) erratic
421 entries. We construct a chain indicating the sequenced entries.
422 To avoid having to allocate this chain, we overlay it onto the space of
423 the ERRATIC array during construction. A final pass iterates over the
424 chain to determine what should be placed in the ERRATIC array, and
425 what is the linear sequence. This overlay is safe from aliasing. */
428 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
429 struct fde_vector
*linear
, struct fde_vector
*erratic
)
431 static const fde
*marker
;
432 size_t count
= linear
->count
;
433 const fde
*const *chain_end
= &marker
;
436 /* This should optimize out, but it is wise to make sure this assumption
437 is correct. Should these have different sizes, we cannot cast between
438 them and the overlaying onto ERRATIC will not work. */
439 gcc_assert (sizeof (const fde
*) == sizeof (const fde
**));
441 for (i
= 0; i
< count
; i
++)
443 const fde
*const *probe
;
445 for (probe
= chain_end
;
446 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
449 chain_end
= (const fde
*const*) erratic
->array
[probe
- linear
->array
];
450 erratic
->array
[probe
- linear
->array
] = NULL
;
452 erratic
->array
[i
] = (const fde
*) chain_end
;
453 chain_end
= &linear
->array
[i
];
456 /* Each entry in LINEAR which is part of the linear sequence we have
457 discovered will correspond to a non-NULL entry in the chain we built in
458 the ERRATIC array. */
459 for (i
= j
= k
= 0; i
< count
; i
++)
460 if (erratic
->array
[i
])
461 linear
->array
[j
++] = linear
->array
[i
];
463 erratic
->array
[k
++] = linear
->array
[i
];
468 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
470 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
471 for the first (root) node; push it down to its rightful place. */
474 frame_downheap (struct object
*ob
, fde_compare_t fde_compare
, const fde
**a
,
479 for (i
= lo
, j
= 2*i
+1;
483 if (j
+1 < hi
&& fde_compare (ob
, a
[j
], a
[j
+1]) < 0)
486 if (fde_compare (ob
, a
[i
], a
[j
]) < 0)
496 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
497 use a name that does not conflict. */
500 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
501 struct fde_vector
*erratic
)
503 /* For a description of this algorithm, see:
504 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
506 const fde
** a
= erratic
->array
;
507 /* A portion of the array is called a "heap" if for all i>=0:
508 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
509 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
510 size_t n
= erratic
->count
;
513 /* Expand our heap incrementally from the end of the array, heapifying
514 each resulting semi-heap as we go. After each step, a[m] is the top
516 for (m
= n
/2-1; m
>= 0; --m
)
517 frame_downheap (ob
, fde_compare
, a
, m
, n
);
519 /* Shrink our heap incrementally from the end of the array, first
520 swapping out the largest element a[0] and then re-heapifying the
521 resulting semi-heap. After each step, a[0..m) is a heap. */
522 for (m
= n
-1; m
>= 1; --m
)
525 frame_downheap (ob
, fde_compare
, a
, 0, m
);
530 /* Merge V1 and V2, both sorted, and put the result into V1. */
532 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
533 struct fde_vector
*v1
, struct fde_vector
*v2
)
545 fde2
= v2
->array
[i2
];
546 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
548 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
551 v1
->array
[i1
+i2
] = fde2
;
554 v1
->count
+= v2
->count
;
559 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
561 fde_compare_t fde_compare
;
563 gcc_assert (!accu
->linear
|| accu
->linear
->count
== count
);
565 if (ob
->s
.b
.mixed_encoding
)
566 fde_compare
= fde_mixed_encoding_compare
;
567 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
568 fde_compare
= fde_unencoded_compare
;
570 fde_compare
= fde_single_encoding_compare
;
574 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
575 gcc_assert (accu
->linear
->count
+ accu
->erratic
->count
== count
);
576 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
577 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
578 free (accu
->erratic
);
582 /* We've not managed to malloc an erratic array,
583 so heap sort in the linear one. */
584 frame_heapsort (ob
, fde_compare
, accu
->linear
);
589 /* Update encoding, mixed_encoding, and pc_begin for OB for the
590 fde array beginning at THIS_FDE. Return the number of fdes
591 encountered along the way. */
594 classify_object_over_fdes (struct object
*ob
, const fde
*this_fde
)
596 const struct dwarf_cie
*last_cie
= 0;
598 int encoding
= DW_EH_PE_absptr
;
599 _Unwind_Ptr base
= 0;
601 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
603 const struct dwarf_cie
*this_cie
;
604 _Unwind_Ptr mask
, pc_begin
;
607 if (this_fde
->CIE_delta
== 0)
610 /* Determine the encoding for this FDE. Note mixed encoded
611 objects for later. */
612 this_cie
= get_cie (this_fde
);
613 if (this_cie
!= last_cie
)
616 encoding
= get_cie_encoding (this_cie
);
617 base
= base_from_object (encoding
, ob
);
618 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
619 ob
->s
.b
.encoding
= encoding
;
620 else if (ob
->s
.b
.encoding
!= encoding
)
621 ob
->s
.b
.mixed_encoding
= 1;
624 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
627 /* Take care to ignore link-once functions that were removed.
628 In these cases, the function address will be NULL, but if
629 the encoding is smaller than a pointer a true NULL may not
630 be representable. Assume 0 in the representable bits is NULL. */
631 mask
= size_of_encoded_value (encoding
);
632 if (mask
< sizeof (void *))
633 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
637 if ((pc_begin
& mask
) == 0)
641 if ((void *) pc_begin
< ob
->pc_begin
)
642 ob
->pc_begin
= (void *) pc_begin
;
649 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, const fde
*this_fde
)
651 const struct dwarf_cie
*last_cie
= 0;
652 int encoding
= ob
->s
.b
.encoding
;
653 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
655 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
657 const struct dwarf_cie
*this_cie
;
660 if (this_fde
->CIE_delta
== 0)
663 if (ob
->s
.b
.mixed_encoding
)
665 /* Determine the encoding for this FDE. Note mixed encoded
666 objects for later. */
667 this_cie
= get_cie (this_fde
);
668 if (this_cie
!= last_cie
)
671 encoding
= get_cie_encoding (this_cie
);
672 base
= base_from_object (encoding
, ob
);
676 if (encoding
== DW_EH_PE_absptr
)
679 memcpy (&ptr
, this_fde
->pc_begin
, sizeof (_Unwind_Ptr
));
685 _Unwind_Ptr pc_begin
, mask
;
687 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
690 /* Take care to ignore link-once functions that were removed.
691 In these cases, the function address will be NULL, but if
692 the encoding is smaller than a pointer a true NULL may not
693 be representable. Assume 0 in the representable bits is NULL. */
694 mask
= size_of_encoded_value (encoding
);
695 if (mask
< sizeof (void *))
696 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
700 if ((pc_begin
& mask
) == 0)
704 fde_insert (accu
, this_fde
);
708 /* Set up a sorted array of pointers to FDEs for a loaded object. We
709 count up the entries before allocating the array because it's likely to
710 be faster. We can be called multiple times, should we have failed to
711 allocate a sorted fde array on a previous occasion. */
714 init_object (struct object
* ob
)
716 struct fde_accumulator accu
;
719 count
= ob
->s
.b
.count
;
722 if (ob
->s
.b
.from_array
)
724 fde
**p
= ob
->u
.array
;
725 for (count
= 0; *p
; ++p
)
726 count
+= classify_object_over_fdes (ob
, *p
);
729 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
731 /* The count field we have in the main struct object is somewhat
732 limited, but should suffice for virtually all cases. If the
733 counted value doesn't fit, re-write a zero. The worst that
734 happens is that we re-count next time -- admittedly non-trivial
735 in that this implies some 2M fdes, but at least we function. */
736 ob
->s
.b
.count
= count
;
737 if (ob
->s
.b
.count
!= count
)
741 if (!start_fde_sort (&accu
, count
))
744 if (ob
->s
.b
.from_array
)
747 for (p
= ob
->u
.array
; *p
; ++p
)
748 add_fdes (ob
, &accu
, *p
);
751 add_fdes (ob
, &accu
, ob
->u
.single
);
753 end_fde_sort (ob
, &accu
, count
);
755 /* Save the original fde pointer, since this is the key by which the
756 DSO will deregister the object. */
757 accu
.linear
->orig_data
= ob
->u
.single
;
758 ob
->u
.sort
= accu
.linear
;
763 /* A linear search through a set of FDEs for the given PC. This is
764 used when there was insufficient memory to allocate and sort an
768 linear_search_fdes (struct object
*ob
, const fde
*this_fde
, void *pc
)
770 const struct dwarf_cie
*last_cie
= 0;
771 int encoding
= ob
->s
.b
.encoding
;
772 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
774 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
776 const struct dwarf_cie
*this_cie
;
777 _Unwind_Ptr pc_begin
, pc_range
;
780 if (this_fde
->CIE_delta
== 0)
783 if (ob
->s
.b
.mixed_encoding
)
785 /* Determine the encoding for this FDE. Note mixed encoded
786 objects for later. */
787 this_cie
= get_cie (this_fde
);
788 if (this_cie
!= last_cie
)
791 encoding
= get_cie_encoding (this_cie
);
792 base
= base_from_object (encoding
, ob
);
796 if (encoding
== DW_EH_PE_absptr
)
798 const _Unwind_Ptr
*pc_array
= (const _Unwind_Ptr
*) this_fde
->pc_begin
;
799 pc_begin
= pc_array
[0];
800 pc_range
= pc_array
[1];
807 const unsigned char *p
;
809 p
= read_encoded_value_with_base (encoding
, base
,
810 this_fde
->pc_begin
, &pc_begin
);
811 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
813 /* Take care to ignore link-once functions that were removed.
814 In these cases, the function address will be NULL, but if
815 the encoding is smaller than a pointer a true NULL may not
816 be representable. Assume 0 in the representable bits is NULL. */
817 mask
= size_of_encoded_value (encoding
);
818 if (mask
< sizeof (void *))
819 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
823 if ((pc_begin
& mask
) == 0)
827 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
834 /* Binary search for an FDE containing the given PC. Here are three
835 implementations of increasing complexity. */
837 static inline const fde
*
838 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
840 struct fde_vector
*vec
= ob
->u
.sort
;
843 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
845 size_t i
= (lo
+ hi
) / 2;
846 const fde
*const f
= vec
->array
[i
];
849 memcpy (&pc_begin
, (const void * const *) f
->pc_begin
, sizeof (void *));
850 memcpy (&pc_range
, (const uaddr
*) f
->pc_begin
+ 1, sizeof (uaddr
));
854 else if (pc
>= pc_begin
+ pc_range
)
863 static inline const fde
*
864 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
866 struct fde_vector
*vec
= ob
->u
.sort
;
867 int encoding
= ob
->s
.b
.encoding
;
868 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
871 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
873 size_t i
= (lo
+ hi
) / 2;
874 const fde
*f
= vec
->array
[i
];
875 _Unwind_Ptr pc_begin
, pc_range
;
876 const unsigned char *p
;
878 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
880 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
882 if ((_Unwind_Ptr
) pc
< pc_begin
)
884 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
893 static inline const fde
*
894 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
896 struct fde_vector
*vec
= ob
->u
.sort
;
899 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
901 size_t i
= (lo
+ hi
) / 2;
902 const fde
*f
= vec
->array
[i
];
903 _Unwind_Ptr pc_begin
, pc_range
;
904 const unsigned char *p
;
907 encoding
= get_fde_encoding (f
);
908 p
= read_encoded_value_with_base (encoding
,
909 base_from_object (encoding
, ob
),
910 f
->pc_begin
, &pc_begin
);
911 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
913 if ((_Unwind_Ptr
) pc
< pc_begin
)
915 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
925 search_object (struct object
* ob
, void *pc
)
927 /* If the data hasn't been sorted, try to do this now. We may have
928 more memory available than last time we tried. */
929 if (! ob
->s
.b
.sorted
)
933 /* Despite the above comment, the normal reason to get here is
934 that we've not processed this object before. A quick range
935 check is in order. */
936 if (pc
< ob
->pc_begin
)
942 if (ob
->s
.b
.mixed_encoding
)
943 return binary_search_mixed_encoding_fdes (ob
, pc
);
944 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
945 return binary_search_unencoded_fdes (ob
, pc
);
947 return binary_search_single_encoding_fdes (ob
, pc
);
951 /* Long slow laborious linear search, cos we've no memory. */
952 if (ob
->s
.b
.from_array
)
955 for (p
= ob
->u
.array
; *p
; p
++)
957 const fde
*f
= linear_search_fdes (ob
, *p
, pc
);
964 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
969 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
974 init_object_mutex_once ();
975 __gthread_mutex_lock (&object_mutex
);
977 /* Linear search through the classified objects, to find the one
978 containing the pc. Note that pc_begin is sorted descending, and
979 we expect objects to be non-overlapping. */
980 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
981 if (pc
>= ob
->pc_begin
)
983 f
= search_object (ob
, pc
);
989 /* Classify and search the objects we've not yet processed. */
990 while ((ob
= unseen_objects
))
994 unseen_objects
= ob
->next
;
995 f
= search_object (ob
, pc
);
997 /* Insert the object into the classified list. */
998 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
999 if ((*p
)->pc_begin
< ob
->pc_begin
)
1009 __gthread_mutex_unlock (&object_mutex
);
1016 bases
->tbase
= ob
->tbase
;
1017 bases
->dbase
= ob
->dbase
;
1019 encoding
= ob
->s
.b
.encoding
;
1020 if (ob
->s
.b
.mixed_encoding
)
1021 encoding
= get_fde_encoding (f
);
1022 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1023 f
->pc_begin
, &func
);
1024 bases
->func
= (void *) func
;