1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2017 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #ifndef _Unwind_Find_FDE
29 #include "coretypes.h"
31 #include "libgcc_tm.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
39 #if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41 #define ATOMIC_FDE_FAST_PATH 1
45 /* The unseen_objects list contains objects that have been registered
46 but not yet categorized in any way. The seen_objects list has had
47 its pc_begin and count fields initialized at minimum, and is sorted
48 by decreasing value of pc_begin. */
49 static struct object
*unseen_objects
;
50 static struct object
*seen_objects
;
51 #ifdef ATOMIC_FDE_FAST_PATH
52 static int any_objects_registered
;
55 #ifdef __GTHREAD_MUTEX_INIT
56 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
57 #define init_object_mutex_once()
59 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
60 static __gthread_mutex_t object_mutex
;
63 init_object_mutex (void)
65 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
69 init_object_mutex_once (void)
71 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
72 __gthread_once (&once
, init_object_mutex
);
75 /* ??? Several targets include this file with stubbing parts of gthr.h
76 and expect no locking to be done. */
77 #define init_object_mutex_once()
78 static __gthread_mutex_t object_mutex
;
82 /* Called from crtbegin.o to register the unwind info for an object. */
85 __register_frame_info_bases (const void *begin
, struct object
*ob
,
86 void *tbase
, void *dbase
)
88 /* If .eh_frame is empty, don't register at all. */
89 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
92 ob
->pc_begin
= (void *)-1;
97 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
98 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
102 init_object_mutex_once ();
103 __gthread_mutex_lock (&object_mutex
);
105 ob
->next
= unseen_objects
;
107 #ifdef ATOMIC_FDE_FAST_PATH
108 /* Set flag that at least one library has registered FDEs.
109 Use relaxed MO here, it is up to the app to ensure that the library
110 loading/initialization happens-before using that library in other
111 threads (in particular unwinding with that library's functions
112 appearing in the backtraces). Calling that library's functions
113 without waiting for the library to initialize would be racy. */
114 if (!any_objects_registered
)
115 __atomic_store_n (&any_objects_registered
, 1, __ATOMIC_RELAXED
);
118 __gthread_mutex_unlock (&object_mutex
);
122 __register_frame_info (const void *begin
, struct object
*ob
)
124 __register_frame_info_bases (begin
, ob
, 0, 0);
128 __register_frame (void *begin
)
132 /* If .eh_frame is empty, don't register at all. */
133 if (*(uword
*) begin
== 0)
136 ob
= malloc (sizeof (struct object
));
137 __register_frame_info (begin
, ob
);
140 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
141 for different translation units. Called from the file generated by
145 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
146 void *tbase
, void *dbase
)
148 ob
->pc_begin
= (void *)-1;
153 ob
->s
.b
.from_array
= 1;
154 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
156 init_object_mutex_once ();
157 __gthread_mutex_lock (&object_mutex
);
159 ob
->next
= unseen_objects
;
161 #ifdef ATOMIC_FDE_FAST_PATH
162 /* Set flag that at least one library has registered FDEs.
163 Use relaxed MO here, it is up to the app to ensure that the library
164 loading/initialization happens-before using that library in other
165 threads (in particular unwinding with that library's functions
166 appearing in the backtraces). Calling that library's functions
167 without waiting for the library to initialize would be racy. */
168 if (!any_objects_registered
)
169 __atomic_store_n (&any_objects_registered
, 1, __ATOMIC_RELAXED
);
172 __gthread_mutex_unlock (&object_mutex
);
176 __register_frame_info_table (void *begin
, struct object
*ob
)
178 __register_frame_info_table_bases (begin
, ob
, 0, 0);
182 __register_frame_table (void *begin
)
184 struct object
*ob
= malloc (sizeof (struct object
));
185 __register_frame_info_table (begin
, ob
);
188 /* Called from crtbegin.o to deregister the unwind info for an object. */
189 /* ??? Glibc has for a while now exported __register_frame_info and
190 __deregister_frame_info. If we call __register_frame_info_bases
191 from crtbegin (wherein it is declared weak), and this object does
192 not get pulled from libgcc.a for other reasons, then the
193 invocation of __deregister_frame_info will be resolved from glibc.
194 Since the registration did not happen there, we'll die.
196 Therefore, declare a new deregistration entry point that does the
197 exact same thing, but will resolve to the same library as
198 implements __register_frame_info_bases. */
201 __deregister_frame_info_bases (const void *begin
)
204 struct object
*ob
= 0;
206 /* If .eh_frame is empty, we haven't registered. */
207 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
210 init_object_mutex_once ();
211 __gthread_mutex_lock (&object_mutex
);
213 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
214 if ((*p
)->u
.single
== begin
)
221 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
222 if ((*p
)->s
.b
.sorted
)
224 if ((*p
)->u
.sort
->orig_data
== begin
)
234 if ((*p
)->u
.single
== begin
)
243 __gthread_mutex_unlock (&object_mutex
);
249 __deregister_frame_info (const void *begin
)
251 return __deregister_frame_info_bases (begin
);
255 __deregister_frame (void *begin
)
257 /* If .eh_frame is empty, we haven't registered. */
258 if (*(uword
*) begin
!= 0)
259 free (__deregister_frame_info (begin
));
263 /* Like base_of_encoded_value, but take the base from a struct object
264 instead of an _Unwind_Context. */
267 base_from_object (unsigned char encoding
, struct object
*ob
)
269 if (encoding
== DW_EH_PE_omit
)
272 switch (encoding
& 0x70)
274 case DW_EH_PE_absptr
:
276 case DW_EH_PE_aligned
:
279 case DW_EH_PE_textrel
:
280 return (_Unwind_Ptr
) ob
->tbase
;
281 case DW_EH_PE_datarel
:
282 return (_Unwind_Ptr
) ob
->dbase
;
288 /* Return the FDE pointer encoding from the CIE. */
289 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
292 get_cie_encoding (const struct dwarf_cie
*cie
)
294 const unsigned char *aug
, *p
;
299 aug
= cie
->augmentation
;
300 p
= aug
+ strlen ((const char *)aug
) + 1; /* Skip the augmentation string. */
301 if (__builtin_expect (cie
->version
>= 4, 0))
303 if (p
[0] != sizeof (void *) || p
[1] != 0)
304 return DW_EH_PE_omit
; /* We are not prepared to handle unexpected
305 address sizes or segment selectors. */
306 p
+= 2; /* Skip address size and segment size. */
310 return DW_EH_PE_absptr
;
312 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
313 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
314 if (cie
->version
== 1) /* Skip return address column. */
317 p
= read_uleb128 (p
, &utmp
);
319 aug
++; /* Skip 'z' */
320 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
323 /* This is what we're looking for. */
326 /* Personality encoding and pointer. */
327 else if (*aug
== 'P')
329 /* ??? Avoid dereferencing indirect pointers, since we're
330 faking the base address. Gotta keep DW_EH_PE_aligned
332 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
335 else if (*aug
== 'L')
337 /* Otherwise end of string, or unknown augmentation. */
339 return DW_EH_PE_absptr
;
345 get_fde_encoding (const struct dwarf_fde
*f
)
347 return get_cie_encoding (get_cie (f
));
351 /* Sorting an array of FDEs by address.
352 (Ideally we would have the linker sort the FDEs so we don't have to do
353 it at run time. But the linkers are not yet prepared for this.) */
355 /* Comparison routines. Three variants of increasing complexity. */
358 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
359 const fde
*x
, const fde
*y
)
361 _Unwind_Ptr x_ptr
, y_ptr
;
362 memcpy (&x_ptr
, x
->pc_begin
, sizeof (_Unwind_Ptr
));
363 memcpy (&y_ptr
, y
->pc_begin
, sizeof (_Unwind_Ptr
));
373 fde_single_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
375 _Unwind_Ptr base
, x_ptr
, y_ptr
;
377 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
378 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
379 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
389 fde_mixed_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
391 int x_encoding
, y_encoding
;
392 _Unwind_Ptr x_ptr
, y_ptr
;
394 x_encoding
= get_fde_encoding (x
);
395 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
396 x
->pc_begin
, &x_ptr
);
398 y_encoding
= get_fde_encoding (y
);
399 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
400 y
->pc_begin
, &y_ptr
);
409 typedef int (*fde_compare_t
) (struct object
*, const fde
*, const fde
*);
412 /* This is a special mix of insertion sort and heap sort, optimized for
413 the data sets that actually occur. They look like
414 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
415 I.e. a linearly increasing sequence (coming from functions in the text
416 section), with additionally a few unordered elements (coming from functions
417 in gnu_linkonce sections) whose values are higher than the values in the
418 surrounding linear sequence (but not necessarily higher than the values
419 at the end of the linear sequence!).
420 The worst-case total run time is O(N) + O(n log (n)), where N is the
421 total number of FDEs and n is the number of erratic ones. */
423 struct fde_accumulator
425 struct fde_vector
*linear
;
426 struct fde_vector
*erratic
;
430 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
436 size
= sizeof (struct fde_vector
) + sizeof (const fde
*) * count
;
437 if ((accu
->linear
= malloc (size
)))
439 accu
->linear
->count
= 0;
440 if ((accu
->erratic
= malloc (size
)))
441 accu
->erratic
->count
= 0;
449 fde_insert (struct fde_accumulator
*accu
, const fde
*this_fde
)
452 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
455 /* Split LINEAR into a linear sequence with low values and an erratic
456 sequence with high values, put the linear one (of longest possible
457 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
459 Because the longest linear sequence we are trying to locate within the
460 incoming LINEAR array can be interspersed with (high valued) erratic
461 entries. We construct a chain indicating the sequenced entries.
462 To avoid having to allocate this chain, we overlay it onto the space of
463 the ERRATIC array during construction. A final pass iterates over the
464 chain to determine what should be placed in the ERRATIC array, and
465 what is the linear sequence. This overlay is safe from aliasing. */
468 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
469 struct fde_vector
*linear
, struct fde_vector
*erratic
)
471 static const fde
*marker
;
472 size_t count
= linear
->count
;
473 const fde
*const *chain_end
= &marker
;
476 /* This should optimize out, but it is wise to make sure this assumption
477 is correct. Should these have different sizes, we cannot cast between
478 them and the overlaying onto ERRATIC will not work. */
479 gcc_assert (sizeof (const fde
*) == sizeof (const fde
**));
481 for (i
= 0; i
< count
; i
++)
483 const fde
*const *probe
;
485 for (probe
= chain_end
;
486 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
489 chain_end
= (const fde
*const*) erratic
->array
[probe
- linear
->array
];
490 erratic
->array
[probe
- linear
->array
] = NULL
;
492 erratic
->array
[i
] = (const fde
*) chain_end
;
493 chain_end
= &linear
->array
[i
];
496 /* Each entry in LINEAR which is part of the linear sequence we have
497 discovered will correspond to a non-NULL entry in the chain we built in
498 the ERRATIC array. */
499 for (i
= j
= k
= 0; i
< count
; i
++)
500 if (erratic
->array
[i
])
501 linear
->array
[j
++] = linear
->array
[i
];
503 erratic
->array
[k
++] = linear
->array
[i
];
508 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
510 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
511 for the first (root) node; push it down to its rightful place. */
514 frame_downheap (struct object
*ob
, fde_compare_t fde_compare
, const fde
**a
,
519 for (i
= lo
, j
= 2*i
+1;
523 if (j
+1 < hi
&& fde_compare (ob
, a
[j
], a
[j
+1]) < 0)
526 if (fde_compare (ob
, a
[i
], a
[j
]) < 0)
536 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
537 use a name that does not conflict. */
540 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
541 struct fde_vector
*erratic
)
543 /* For a description of this algorithm, see:
544 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
546 const fde
** a
= erratic
->array
;
547 /* A portion of the array is called a "heap" if for all i>=0:
548 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
549 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
550 size_t n
= erratic
->count
;
553 /* Expand our heap incrementally from the end of the array, heapifying
554 each resulting semi-heap as we go. After each step, a[m] is the top
556 for (m
= n
/2-1; m
>= 0; --m
)
557 frame_downheap (ob
, fde_compare
, a
, m
, n
);
559 /* Shrink our heap incrementally from the end of the array, first
560 swapping out the largest element a[0] and then re-heapifying the
561 resulting semi-heap. After each step, a[0..m) is a heap. */
562 for (m
= n
-1; m
>= 1; --m
)
565 frame_downheap (ob
, fde_compare
, a
, 0, m
);
570 /* Merge V1 and V2, both sorted, and put the result into V1. */
572 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
573 struct fde_vector
*v1
, struct fde_vector
*v2
)
585 fde2
= v2
->array
[i2
];
586 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
588 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
591 v1
->array
[i1
+i2
] = fde2
;
594 v1
->count
+= v2
->count
;
599 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
601 fde_compare_t fde_compare
;
603 gcc_assert (!accu
->linear
|| accu
->linear
->count
== count
);
605 if (ob
->s
.b
.mixed_encoding
)
606 fde_compare
= fde_mixed_encoding_compare
;
607 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
608 fde_compare
= fde_unencoded_compare
;
610 fde_compare
= fde_single_encoding_compare
;
614 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
615 gcc_assert (accu
->linear
->count
+ accu
->erratic
->count
== count
);
616 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
617 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
618 free (accu
->erratic
);
622 /* We've not managed to malloc an erratic array,
623 so heap sort in the linear one. */
624 frame_heapsort (ob
, fde_compare
, accu
->linear
);
629 /* Update encoding, mixed_encoding, and pc_begin for OB for the
630 fde array beginning at THIS_FDE. Return the number of fdes
631 encountered along the way. */
634 classify_object_over_fdes (struct object
*ob
, const fde
*this_fde
)
636 const struct dwarf_cie
*last_cie
= 0;
638 int encoding
= DW_EH_PE_absptr
;
639 _Unwind_Ptr base
= 0;
641 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
643 const struct dwarf_cie
*this_cie
;
644 _Unwind_Ptr mask
, pc_begin
;
647 if (this_fde
->CIE_delta
== 0)
650 /* Determine the encoding for this FDE. Note mixed encoded
651 objects for later. */
652 this_cie
= get_cie (this_fde
);
653 if (this_cie
!= last_cie
)
656 encoding
= get_cie_encoding (this_cie
);
657 if (encoding
== DW_EH_PE_omit
)
659 base
= base_from_object (encoding
, ob
);
660 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
661 ob
->s
.b
.encoding
= encoding
;
662 else if (ob
->s
.b
.encoding
!= encoding
)
663 ob
->s
.b
.mixed_encoding
= 1;
666 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
669 /* Take care to ignore link-once functions that were removed.
670 In these cases, the function address will be NULL, but if
671 the encoding is smaller than a pointer a true NULL may not
672 be representable. Assume 0 in the representable bits is NULL. */
673 mask
= size_of_encoded_value (encoding
);
674 if (mask
< sizeof (void *))
675 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
679 if ((pc_begin
& mask
) == 0)
683 if ((void *) pc_begin
< ob
->pc_begin
)
684 ob
->pc_begin
= (void *) pc_begin
;
691 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, const fde
*this_fde
)
693 const struct dwarf_cie
*last_cie
= 0;
694 int encoding
= ob
->s
.b
.encoding
;
695 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
697 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
699 const struct dwarf_cie
*this_cie
;
702 if (this_fde
->CIE_delta
== 0)
705 if (ob
->s
.b
.mixed_encoding
)
707 /* Determine the encoding for this FDE. Note mixed encoded
708 objects for later. */
709 this_cie
= get_cie (this_fde
);
710 if (this_cie
!= last_cie
)
713 encoding
= get_cie_encoding (this_cie
);
714 base
= base_from_object (encoding
, ob
);
718 if (encoding
== DW_EH_PE_absptr
)
721 memcpy (&ptr
, this_fde
->pc_begin
, sizeof (_Unwind_Ptr
));
727 _Unwind_Ptr pc_begin
, mask
;
729 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
732 /* Take care to ignore link-once functions that were removed.
733 In these cases, the function address will be NULL, but if
734 the encoding is smaller than a pointer a true NULL may not
735 be representable. Assume 0 in the representable bits is NULL. */
736 mask
= size_of_encoded_value (encoding
);
737 if (mask
< sizeof (void *))
738 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
742 if ((pc_begin
& mask
) == 0)
746 fde_insert (accu
, this_fde
);
750 /* Set up a sorted array of pointers to FDEs for a loaded object. We
751 count up the entries before allocating the array because it's likely to
752 be faster. We can be called multiple times, should we have failed to
753 allocate a sorted fde array on a previous occasion. */
756 init_object (struct object
* ob
)
758 struct fde_accumulator accu
;
761 count
= ob
->s
.b
.count
;
764 if (ob
->s
.b
.from_array
)
766 fde
**p
= ob
->u
.array
;
767 for (count
= 0; *p
; ++p
)
769 size_t cur_count
= classify_object_over_fdes (ob
, *p
);
770 if (cur_count
== (size_t) -1)
777 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
778 if (count
== (size_t) -1)
780 static const fde terminator
;
783 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
784 ob
->u
.single
= &terminator
;
789 /* The count field we have in the main struct object is somewhat
790 limited, but should suffice for virtually all cases. If the
791 counted value doesn't fit, re-write a zero. The worst that
792 happens is that we re-count next time -- admittedly non-trivial
793 in that this implies some 2M fdes, but at least we function. */
794 ob
->s
.b
.count
= count
;
795 if (ob
->s
.b
.count
!= count
)
799 if (!start_fde_sort (&accu
, count
))
802 if (ob
->s
.b
.from_array
)
805 for (p
= ob
->u
.array
; *p
; ++p
)
806 add_fdes (ob
, &accu
, *p
);
809 add_fdes (ob
, &accu
, ob
->u
.single
);
811 end_fde_sort (ob
, &accu
, count
);
813 /* Save the original fde pointer, since this is the key by which the
814 DSO will deregister the object. */
815 accu
.linear
->orig_data
= ob
->u
.single
;
816 ob
->u
.sort
= accu
.linear
;
821 /* A linear search through a set of FDEs for the given PC. This is
822 used when there was insufficient memory to allocate and sort an
826 linear_search_fdes (struct object
*ob
, const fde
*this_fde
, void *pc
)
828 const struct dwarf_cie
*last_cie
= 0;
829 int encoding
= ob
->s
.b
.encoding
;
830 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
832 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
834 const struct dwarf_cie
*this_cie
;
835 _Unwind_Ptr pc_begin
, pc_range
;
838 if (this_fde
->CIE_delta
== 0)
841 if (ob
->s
.b
.mixed_encoding
)
843 /* Determine the encoding for this FDE. Note mixed encoded
844 objects for later. */
845 this_cie
= get_cie (this_fde
);
846 if (this_cie
!= last_cie
)
849 encoding
= get_cie_encoding (this_cie
);
850 base
= base_from_object (encoding
, ob
);
854 if (encoding
== DW_EH_PE_absptr
)
856 const _Unwind_Ptr
*pc_array
= (const _Unwind_Ptr
*) this_fde
->pc_begin
;
857 pc_begin
= pc_array
[0];
858 pc_range
= pc_array
[1];
865 const unsigned char *p
;
867 p
= read_encoded_value_with_base (encoding
, base
,
868 this_fde
->pc_begin
, &pc_begin
);
869 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
871 /* Take care to ignore link-once functions that were removed.
872 In these cases, the function address will be NULL, but if
873 the encoding is smaller than a pointer a true NULL may not
874 be representable. Assume 0 in the representable bits is NULL. */
875 mask
= size_of_encoded_value (encoding
);
876 if (mask
< sizeof (void *))
877 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
881 if ((pc_begin
& mask
) == 0)
885 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
892 /* Binary search for an FDE containing the given PC. Here are three
893 implementations of increasing complexity. */
895 static inline const fde
*
896 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
898 struct fde_vector
*vec
= ob
->u
.sort
;
901 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
903 size_t i
= (lo
+ hi
) / 2;
904 const fde
*const f
= vec
->array
[i
];
907 memcpy (&pc_begin
, (const void * const *) f
->pc_begin
, sizeof (void *));
908 memcpy (&pc_range
, (const uaddr
*) f
->pc_begin
+ 1, sizeof (uaddr
));
912 else if (pc
>= pc_begin
+ pc_range
)
921 static inline const fde
*
922 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
924 struct fde_vector
*vec
= ob
->u
.sort
;
925 int encoding
= ob
->s
.b
.encoding
;
926 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
929 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
931 size_t i
= (lo
+ hi
) / 2;
932 const fde
*f
= vec
->array
[i
];
933 _Unwind_Ptr pc_begin
, pc_range
;
934 const unsigned char *p
;
936 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
938 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
940 if ((_Unwind_Ptr
) pc
< pc_begin
)
942 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
951 static inline const fde
*
952 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
954 struct fde_vector
*vec
= ob
->u
.sort
;
957 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
959 size_t i
= (lo
+ hi
) / 2;
960 const fde
*f
= vec
->array
[i
];
961 _Unwind_Ptr pc_begin
, pc_range
;
962 const unsigned char *p
;
965 encoding
= get_fde_encoding (f
);
966 p
= read_encoded_value_with_base (encoding
,
967 base_from_object (encoding
, ob
),
968 f
->pc_begin
, &pc_begin
);
969 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
971 if ((_Unwind_Ptr
) pc
< pc_begin
)
973 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
983 search_object (struct object
* ob
, void *pc
)
985 /* If the data hasn't been sorted, try to do this now. We may have
986 more memory available than last time we tried. */
987 if (! ob
->s
.b
.sorted
)
991 /* Despite the above comment, the normal reason to get here is
992 that we've not processed this object before. A quick range
993 check is in order. */
994 if (pc
< ob
->pc_begin
)
1000 if (ob
->s
.b
.mixed_encoding
)
1001 return binary_search_mixed_encoding_fdes (ob
, pc
);
1002 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
1003 return binary_search_unencoded_fdes (ob
, pc
);
1005 return binary_search_single_encoding_fdes (ob
, pc
);
1009 /* Long slow laborious linear search, cos we've no memory. */
1010 if (ob
->s
.b
.from_array
)
1013 for (p
= ob
->u
.array
; *p
; p
++)
1015 const fde
*f
= linear_search_fdes (ob
, *p
, pc
);
1022 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
1027 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
1030 const fde
*f
= NULL
;
1032 #ifdef ATOMIC_FDE_FAST_PATH
1033 /* For targets where unwind info is usually not registered through these
1034 APIs anymore, avoid taking a global lock.
1035 Use relaxed MO here, it is up to the app to ensure that the library
1036 loading/initialization happens-before using that library in other
1037 threads (in particular unwinding with that library's functions
1038 appearing in the backtraces). Calling that library's functions
1039 without waiting for the library to initialize would be racy. */
1040 if (__builtin_expect (!__atomic_load_n (&any_objects_registered
,
1041 __ATOMIC_RELAXED
), 1))
1045 init_object_mutex_once ();
1046 __gthread_mutex_lock (&object_mutex
);
1048 /* Linear search through the classified objects, to find the one
1049 containing the pc. Note that pc_begin is sorted descending, and
1050 we expect objects to be non-overlapping. */
1051 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1052 if (pc
>= ob
->pc_begin
)
1054 f
= search_object (ob
, pc
);
1060 /* Classify and search the objects we've not yet processed. */
1061 while ((ob
= unseen_objects
))
1065 unseen_objects
= ob
->next
;
1066 f
= search_object (ob
, pc
);
1068 /* Insert the object into the classified list. */
1069 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1070 if ((*p
)->pc_begin
< ob
->pc_begin
)
1080 __gthread_mutex_unlock (&object_mutex
);
1087 bases
->tbase
= ob
->tbase
;
1088 bases
->dbase
= ob
->dbase
;
1090 encoding
= ob
->s
.b
.encoding
;
1091 if (ob
->s
.b
.mixed_encoding
)
1092 encoding
= get_fde_encoding (f
);
1093 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1094 f
->pc_begin
, &func
);
1095 bases
->func
= (void *) func
;