1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2022 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #ifndef _Unwind_Find_FDE
29 #include "coretypes.h"
31 #include "libgcc_tm.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
39 #if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41 #define ATOMIC_FDE_FAST_PATH 1
45 typedef __UINTPTR_TYPE__ uintptr_type
;
47 #ifdef ATOMIC_FDE_FAST_PATH
48 #include "unwind-dw2-btree.h"
50 static struct btree registered_frames
;
51 static bool in_shutdown
;
54 release_registered_frames (void) __attribute__ ((destructor
));
56 release_registered_frames (void)
58 /* Release the b-tree and all frames. Frame releases that happen later are
60 btree_destroy (®istered_frames
);
65 get_pc_range (const struct object
*ob
, uintptr_type
*range
);
67 init_object (struct object
*ob
);
70 /* Without fast path frame deregistration must always succeed. */
71 static const int in_shutdown
= 0;
73 /* The unseen_objects list contains objects that have been registered
74 but not yet categorized in any way. The seen_objects list has had
75 its pc_begin and count fields initialized at minimum, and is sorted
76 by decreasing value of pc_begin. */
77 static struct object
*unseen_objects
;
78 static struct object
*seen_objects
;
80 #ifdef __GTHREAD_MUTEX_INIT
81 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
82 #define init_object_mutex_once()
84 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
85 static __gthread_mutex_t object_mutex
;
88 init_object_mutex (void)
90 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
94 init_object_mutex_once (void)
96 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
97 __gthread_once (&once
, init_object_mutex
);
100 /* ??? Several targets include this file with stubbing parts of gthr.h
101 and expect no locking to be done. */
102 #define init_object_mutex_once()
103 static __gthread_mutex_t object_mutex
;
108 /* Called from crtbegin.o to register the unwind info for an object. */
111 __register_frame_info_bases (const void *begin
, struct object
*ob
,
112 void *tbase
, void *dbase
)
114 /* If .eh_frame is empty, don't register at all. */
115 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
118 ob
->pc_begin
= (void *)-1;
121 ob
->u
.single
= begin
;
123 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
124 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
128 #ifdef ATOMIC_FDE_FAST_PATH
129 // Initialize eagerly to avoid locking later
132 // And register the frame
133 uintptr_type range
[2];
134 get_pc_range (ob
, range
);
135 btree_insert (®istered_frames
, range
[0], range
[1] - range
[0], ob
);
137 init_object_mutex_once ();
138 __gthread_mutex_lock (&object_mutex
);
140 ob
->next
= unseen_objects
;
143 __gthread_mutex_unlock (&object_mutex
);
148 __register_frame_info (const void *begin
, struct object
*ob
)
150 __register_frame_info_bases (begin
, ob
, 0, 0);
154 __register_frame (void *begin
)
158 /* If .eh_frame is empty, don't register at all. */
159 if (*(uword
*) begin
== 0)
162 ob
= malloc (sizeof (struct object
));
163 __register_frame_info (begin
, ob
);
166 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
167 for different translation units. Called from the file generated by
171 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
172 void *tbase
, void *dbase
)
174 ob
->pc_begin
= (void *)-1;
179 ob
->s
.b
.from_array
= 1;
180 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
182 #ifdef ATOMIC_FDE_FAST_PATH
183 // Initialize eagerly to avoid locking later
186 // And register the frame
187 uintptr_type range
[2];
188 get_pc_range (ob
, range
);
189 btree_insert (®istered_frames
, range
[0], range
[1] - range
[0], ob
);
191 init_object_mutex_once ();
192 __gthread_mutex_lock (&object_mutex
);
194 ob
->next
= unseen_objects
;
197 __gthread_mutex_unlock (&object_mutex
);
202 __register_frame_info_table (void *begin
, struct object
*ob
)
204 __register_frame_info_table_bases (begin
, ob
, 0, 0);
208 __register_frame_table (void *begin
)
210 struct object
*ob
= malloc (sizeof (struct object
));
211 __register_frame_info_table (begin
, ob
);
214 /* Called from crtbegin.o to deregister the unwind info for an object. */
215 /* ??? Glibc has for a while now exported __register_frame_info and
216 __deregister_frame_info. If we call __register_frame_info_bases
217 from crtbegin (wherein it is declared weak), and this object does
218 not get pulled from libgcc.a for other reasons, then the
219 invocation of __deregister_frame_info will be resolved from glibc.
220 Since the registration did not happen there, we'll die.
222 Therefore, declare a new deregistration entry point that does the
223 exact same thing, but will resolve to the same library as
224 implements __register_frame_info_bases. */
227 __deregister_frame_info_bases (const void *begin
)
229 struct object
*ob
= 0;
231 /* If .eh_frame is empty, we haven't registered. */
232 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
235 #ifdef ATOMIC_FDE_FAST_PATH
236 // Find the corresponding PC range
237 struct object lookupob
;
240 lookupob
.u
.single
= begin
;
242 lookupob
.s
.b
.encoding
= DW_EH_PE_omit
;
243 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
244 lookupob
.fde_end
= NULL
;
246 uintptr_type range
[2];
247 get_pc_range (&lookupob
, range
);
250 ob
= btree_remove (®istered_frames
, range
[0]);
252 init_object_mutex_once ();
253 __gthread_mutex_lock (&object_mutex
);
256 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
257 if ((*p
)->u
.single
== begin
)
264 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
265 if ((*p
)->s
.b
.sorted
)
267 if ((*p
)->u
.sort
->orig_data
== begin
)
277 if ((*p
)->u
.single
== begin
)
286 __gthread_mutex_unlock (&object_mutex
);
289 gcc_assert (in_shutdown
|| ob
);
294 __deregister_frame_info (const void *begin
)
296 return __deregister_frame_info_bases (begin
);
300 __deregister_frame (void *begin
)
302 /* If .eh_frame is empty, we haven't registered. */
303 if (*(uword
*) begin
!= 0)
304 free (__deregister_frame_info (begin
));
308 /* Like base_of_encoded_value, but take the base from a struct object
309 instead of an _Unwind_Context. */
312 base_from_object (unsigned char encoding
, const struct object
*ob
)
314 if (encoding
== DW_EH_PE_omit
)
317 switch (encoding
& 0x70)
319 case DW_EH_PE_absptr
:
321 case DW_EH_PE_aligned
:
324 case DW_EH_PE_textrel
:
325 return (_Unwind_Ptr
) ob
->tbase
;
326 case DW_EH_PE_datarel
:
327 return (_Unwind_Ptr
) ob
->dbase
;
333 /* Return the FDE pointer encoding from the CIE. */
334 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
337 get_cie_encoding (const struct dwarf_cie
*cie
)
339 const unsigned char *aug
, *p
;
344 aug
= cie
->augmentation
;
345 p
= aug
+ strlen ((const char *)aug
) + 1; /* Skip the augmentation string. */
346 if (__builtin_expect (cie
->version
>= 4, 0))
348 if (p
[0] != sizeof (void *) || p
[1] != 0)
349 return DW_EH_PE_omit
; /* We are not prepared to handle unexpected
350 address sizes or segment selectors. */
351 p
+= 2; /* Skip address size and segment size. */
355 return DW_EH_PE_absptr
;
357 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
358 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
359 if (cie
->version
== 1) /* Skip return address column. */
362 p
= read_uleb128 (p
, &utmp
);
364 aug
++; /* Skip 'z' */
365 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
368 /* This is what we're looking for. */
371 /* Personality encoding and pointer. */
372 else if (*aug
== 'P')
374 /* ??? Avoid dereferencing indirect pointers, since we're
375 faking the base address. Gotta keep DW_EH_PE_aligned
377 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
380 else if (*aug
== 'L')
382 /* aarch64 b-key pointer authentication. */
383 else if (*aug
== 'B')
385 /* Otherwise end of string, or unknown augmentation. */
387 return DW_EH_PE_absptr
;
393 get_fde_encoding (const struct dwarf_fde
*f
)
395 return get_cie_encoding (get_cie (f
));
399 /* Sorting an array of FDEs by address.
400 (Ideally we would have the linker sort the FDEs so we don't have to do
401 it at run time. But the linkers are not yet prepared for this.) */
403 /* Comparison routines. Three variants of increasing complexity. */
406 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
407 const fde
*x
, const fde
*y
)
409 _Unwind_Ptr x_ptr
, y_ptr
;
410 memcpy (&x_ptr
, x
->pc_begin
, sizeof (_Unwind_Ptr
));
411 memcpy (&y_ptr
, y
->pc_begin
, sizeof (_Unwind_Ptr
));
421 fde_single_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
423 _Unwind_Ptr base
, x_ptr
, y_ptr
;
425 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
426 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
427 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
437 fde_mixed_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
439 int x_encoding
, y_encoding
;
440 _Unwind_Ptr x_ptr
, y_ptr
;
442 x_encoding
= get_fde_encoding (x
);
443 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
444 x
->pc_begin
, &x_ptr
);
446 y_encoding
= get_fde_encoding (y
);
447 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
448 y
->pc_begin
, &y_ptr
);
457 typedef int (*fde_compare_t
) (struct object
*, const fde
*, const fde
*);
460 /* This is a special mix of insertion sort and heap sort, optimized for
461 the data sets that actually occur. They look like
462 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
463 I.e. a linearly increasing sequence (coming from functions in the text
464 section), with additionally a few unordered elements (coming from functions
465 in gnu_linkonce sections) whose values are higher than the values in the
466 surrounding linear sequence (but not necessarily higher than the values
467 at the end of the linear sequence!).
468 The worst-case total run time is O(N) + O(n log (n)), where N is the
469 total number of FDEs and n is the number of erratic ones. */
471 struct fde_accumulator
473 struct fde_vector
*linear
;
474 struct fde_vector
*erratic
;
478 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
484 size
= sizeof (struct fde_vector
) + sizeof (const fde
*) * count
;
485 if ((accu
->linear
= malloc (size
)))
487 accu
->linear
->count
= 0;
488 if ((accu
->erratic
= malloc (size
)))
489 accu
->erratic
->count
= 0;
497 fde_insert (struct fde_accumulator
*accu
, const fde
*this_fde
)
500 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
503 /* Split LINEAR into a linear sequence with low values and an erratic
504 sequence with high values, put the linear one (of longest possible
505 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
507 Because the longest linear sequence we are trying to locate within the
508 incoming LINEAR array can be interspersed with (high valued) erratic
509 entries. We construct a chain indicating the sequenced entries.
510 To avoid having to allocate this chain, we overlay it onto the space of
511 the ERRATIC array during construction. A final pass iterates over the
512 chain to determine what should be placed in the ERRATIC array, and
513 what is the linear sequence. This overlay is safe from aliasing. */
516 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
517 struct fde_vector
*linear
, struct fde_vector
*erratic
)
519 static const fde
*marker
;
520 size_t count
= linear
->count
;
521 const fde
*const *chain_end
= &marker
;
524 /* This should optimize out, but it is wise to make sure this assumption
525 is correct. Should these have different sizes, we cannot cast between
526 them and the overlaying onto ERRATIC will not work. */
527 gcc_assert (sizeof (const fde
*) == sizeof (const fde
**));
529 for (i
= 0; i
< count
; i
++)
531 const fde
*const *probe
;
533 for (probe
= chain_end
;
534 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
537 chain_end
= (const fde
*const*) erratic
->array
[probe
- linear
->array
];
538 erratic
->array
[probe
- linear
->array
] = NULL
;
540 erratic
->array
[i
] = (const fde
*) chain_end
;
541 chain_end
= &linear
->array
[i
];
544 /* Each entry in LINEAR which is part of the linear sequence we have
545 discovered will correspond to a non-NULL entry in the chain we built in
546 the ERRATIC array. */
547 for (i
= j
= k
= 0; i
< count
; i
++)
548 if (erratic
->array
[i
])
549 linear
->array
[j
++] = linear
->array
[i
];
551 erratic
->array
[k
++] = linear
->array
[i
];
556 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
558 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
559 for the first (root) node; push it down to its rightful place. */
562 frame_downheap (struct object
*ob
, fde_compare_t fde_compare
, const fde
**a
,
567 for (i
= lo
, j
= 2*i
+1;
571 if (j
+1 < hi
&& fde_compare (ob
, a
[j
], a
[j
+1]) < 0)
574 if (fde_compare (ob
, a
[i
], a
[j
]) < 0)
584 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
585 use a name that does not conflict. */
588 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
589 struct fde_vector
*erratic
)
591 /* For a description of this algorithm, see:
592 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
594 const fde
** a
= erratic
->array
;
595 /* A portion of the array is called a "heap" if for all i>=0:
596 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
597 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
598 size_t n
= erratic
->count
;
601 /* Expand our heap incrementally from the end of the array, heapifying
602 each resulting semi-heap as we go. After each step, a[m] is the top
604 for (m
= n
/2-1; m
>= 0; --m
)
605 frame_downheap (ob
, fde_compare
, a
, m
, n
);
607 /* Shrink our heap incrementally from the end of the array, first
608 swapping out the largest element a[0] and then re-heapifying the
609 resulting semi-heap. After each step, a[0..m) is a heap. */
610 for (m
= n
-1; m
>= 1; --m
)
613 frame_downheap (ob
, fde_compare
, a
, 0, m
);
618 /* Merge V1 and V2, both sorted, and put the result into V1. */
620 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
621 struct fde_vector
*v1
, struct fde_vector
*v2
)
633 fde2
= v2
->array
[i2
];
634 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
636 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
639 v1
->array
[i1
+i2
] = fde2
;
642 v1
->count
+= v2
->count
;
647 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
649 fde_compare_t fde_compare
;
651 gcc_assert (!accu
->linear
|| accu
->linear
->count
== count
);
653 if (ob
->s
.b
.mixed_encoding
)
654 fde_compare
= fde_mixed_encoding_compare
;
655 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
656 fde_compare
= fde_unencoded_compare
;
658 fde_compare
= fde_single_encoding_compare
;
662 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
663 gcc_assert (accu
->linear
->count
+ accu
->erratic
->count
== count
);
664 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
665 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
666 free (accu
->erratic
);
670 /* We've not managed to malloc an erratic array,
671 so heap sort in the linear one. */
672 frame_heapsort (ob
, fde_compare
, accu
->linear
);
676 /* Inspect the fde array beginning at this_fde. This
677 function can be used either in query mode (RANGE is
678 not null, OB is const), or in update mode (RANGE is
679 null, OB is modified). In query mode the function computes
680 the range of PC values and stores it in RANGE. In
681 update mode it updates encoding, mixed_encoding, and pc_begin
682 for OB. Return the number of fdes encountered along the way. */
685 classify_object_over_fdes (struct object
*ob
, const fde
*this_fde
,
688 const struct dwarf_cie
*last_cie
= 0;
690 int encoding
= DW_EH_PE_absptr
;
691 _Unwind_Ptr base
= 0;
693 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
695 const struct dwarf_cie
*this_cie
;
696 _Unwind_Ptr mask
, pc_begin
;
699 if (this_fde
->CIE_delta
== 0)
702 /* Determine the encoding for this FDE. Note mixed encoded
703 objects for later. */
704 this_cie
= get_cie (this_fde
);
705 if (this_cie
!= last_cie
)
708 encoding
= get_cie_encoding (this_cie
);
709 if (encoding
== DW_EH_PE_omit
)
711 base
= base_from_object (encoding
, ob
);
714 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
715 ob
->s
.b
.encoding
= encoding
;
716 else if (ob
->s
.b
.encoding
!= encoding
)
717 ob
->s
.b
.mixed_encoding
= 1;
721 const unsigned char *p
;
722 p
= read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
725 /* Take care to ignore link-once functions that were removed.
726 In these cases, the function address will be NULL, but if
727 the encoding is smaller than a pointer a true NULL may not
728 be representable. Assume 0 in the representable bits is NULL. */
729 mask
= size_of_encoded_value (encoding
);
730 if (mask
< sizeof (void *))
731 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
735 if ((pc_begin
& mask
) == 0)
741 _Unwind_Ptr pc_range
, pc_end
;
742 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
743 pc_end
= pc_begin
+ pc_range
;
744 if ((!range
[0]) && (!range
[1]))
751 if (pc_begin
< range
[0])
753 if (pc_end
> range
[1])
759 if ((void *) pc_begin
< ob
->pc_begin
)
760 ob
->pc_begin
= (void *) pc_begin
;
768 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, const fde
*this_fde
)
770 const struct dwarf_cie
*last_cie
= 0;
771 int encoding
= ob
->s
.b
.encoding
;
772 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
774 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
776 const struct dwarf_cie
*this_cie
;
779 if (this_fde
->CIE_delta
== 0)
782 if (ob
->s
.b
.mixed_encoding
)
784 /* Determine the encoding for this FDE. Note mixed encoded
785 objects for later. */
786 this_cie
= get_cie (this_fde
);
787 if (this_cie
!= last_cie
)
790 encoding
= get_cie_encoding (this_cie
);
791 base
= base_from_object (encoding
, ob
);
795 if (encoding
== DW_EH_PE_absptr
)
798 memcpy (&ptr
, this_fde
->pc_begin
, sizeof (_Unwind_Ptr
));
804 _Unwind_Ptr pc_begin
, mask
;
806 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
809 /* Take care to ignore link-once functions that were removed.
810 In these cases, the function address will be NULL, but if
811 the encoding is smaller than a pointer a true NULL may not
812 be representable. Assume 0 in the representable bits is NULL. */
813 mask
= size_of_encoded_value (encoding
);
814 if (mask
< sizeof (void *))
815 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
819 if ((pc_begin
& mask
) == 0)
823 fde_insert (accu
, this_fde
);
827 /* Set up a sorted array of pointers to FDEs for a loaded object. We
828 count up the entries before allocating the array because it's likely to
829 be faster. We can be called multiple times, should we have failed to
830 allocate a sorted fde array on a previous occasion. */
833 init_object (struct object
* ob
)
835 struct fde_accumulator accu
;
838 count
= ob
->s
.b
.count
;
841 if (ob
->s
.b
.from_array
)
843 fde
**p
= ob
->u
.array
;
844 for (count
= 0; *p
; ++p
)
846 size_t cur_count
= classify_object_over_fdes (ob
, *p
, NULL
);
847 if (cur_count
== (size_t) -1)
854 count
= classify_object_over_fdes (ob
, ob
->u
.single
, NULL
);
855 if (count
== (size_t) -1)
857 static const fde terminator
;
860 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
861 ob
->u
.single
= &terminator
;
866 /* The count field we have in the main struct object is somewhat
867 limited, but should suffice for virtually all cases. If the
868 counted value doesn't fit, re-write a zero. The worst that
869 happens is that we re-count next time -- admittedly non-trivial
870 in that this implies some 2M fdes, but at least we function. */
871 ob
->s
.b
.count
= count
;
872 if (ob
->s
.b
.count
!= count
)
876 if (!start_fde_sort (&accu
, count
))
879 if (ob
->s
.b
.from_array
)
882 for (p
= ob
->u
.array
; *p
; ++p
)
883 add_fdes (ob
, &accu
, *p
);
886 add_fdes (ob
, &accu
, ob
->u
.single
);
888 end_fde_sort (ob
, &accu
, count
);
890 /* Save the original fde pointer, since this is the key by which the
891 DSO will deregister the object. */
892 accu
.linear
->orig_data
= ob
->u
.single
;
893 ob
->u
.sort
= accu
.linear
;
898 #ifdef ATOMIC_FDE_FAST_PATH
899 /* Get the PC range for lookup */
901 get_pc_range (const struct object
*ob
, uintptr_type
*range
)
903 // It is safe to cast to non-const object* here as
904 // classify_object_over_fdes does not modify ob in query mode.
905 struct object
*ncob
= (struct object
*) (uintptr_type
) ob
;
906 range
[0] = range
[1] = 0;
909 classify_object_over_fdes (ncob
, ob
->u
.sort
->orig_data
, range
);
911 else if (ob
->s
.b
.from_array
)
913 fde
**p
= ob
->u
.array
;
915 classify_object_over_fdes (ncob
, *p
, range
);
919 classify_object_over_fdes (ncob
, ob
->u
.single
, range
);
924 /* A linear search through a set of FDEs for the given PC. This is
925 used when there was insufficient memory to allocate and sort an
929 linear_search_fdes (struct object
*ob
, const fde
*this_fde
, void *pc
)
931 const struct dwarf_cie
*last_cie
= 0;
932 int encoding
= ob
->s
.b
.encoding
;
933 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
935 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
937 const struct dwarf_cie
*this_cie
;
938 _Unwind_Ptr pc_begin
, pc_range
;
941 if (this_fde
->CIE_delta
== 0)
944 if (ob
->s
.b
.mixed_encoding
)
946 /* Determine the encoding for this FDE. Note mixed encoded
947 objects for later. */
948 this_cie
= get_cie (this_fde
);
949 if (this_cie
!= last_cie
)
952 encoding
= get_cie_encoding (this_cie
);
953 base
= base_from_object (encoding
, ob
);
957 if (encoding
== DW_EH_PE_absptr
)
959 const _Unwind_Ptr
*pc_array
= (const _Unwind_Ptr
*) this_fde
->pc_begin
;
960 pc_begin
= pc_array
[0];
961 pc_range
= pc_array
[1];
968 const unsigned char *p
;
970 p
= read_encoded_value_with_base (encoding
, base
,
971 this_fde
->pc_begin
, &pc_begin
);
972 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
974 /* Take care to ignore link-once functions that were removed.
975 In these cases, the function address will be NULL, but if
976 the encoding is smaller than a pointer a true NULL may not
977 be representable. Assume 0 in the representable bits is NULL. */
978 mask
= size_of_encoded_value (encoding
);
979 if (mask
< sizeof (void *))
980 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
984 if ((pc_begin
& mask
) == 0)
988 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
995 /* Binary search for an FDE containing the given PC. Here are three
996 implementations of increasing complexity. */
998 static inline const fde
*
999 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
1001 struct fde_vector
*vec
= ob
->u
.sort
;
1004 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
1006 size_t i
= (lo
+ hi
) / 2;
1007 const fde
*const f
= vec
->array
[i
];
1010 memcpy (&pc_begin
, (const void * const *) f
->pc_begin
, sizeof (void *));
1011 memcpy (&pc_range
, (const uaddr
*) f
->pc_begin
+ 1, sizeof (uaddr
));
1015 else if (pc
>= pc_begin
+ pc_range
)
1024 static inline const fde
*
1025 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
1027 struct fde_vector
*vec
= ob
->u
.sort
;
1028 int encoding
= ob
->s
.b
.encoding
;
1029 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
1032 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
1034 size_t i
= (lo
+ hi
) / 2;
1035 const fde
*f
= vec
->array
[i
];
1036 _Unwind_Ptr pc_begin
, pc_range
;
1037 const unsigned char *p
;
1039 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
1041 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
1043 if ((_Unwind_Ptr
) pc
< pc_begin
)
1045 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
1054 static inline const fde
*
1055 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
1057 struct fde_vector
*vec
= ob
->u
.sort
;
1060 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
1062 size_t i
= (lo
+ hi
) / 2;
1063 const fde
*f
= vec
->array
[i
];
1064 _Unwind_Ptr pc_begin
, pc_range
;
1065 const unsigned char *p
;
1068 encoding
= get_fde_encoding (f
);
1069 p
= read_encoded_value_with_base (encoding
,
1070 base_from_object (encoding
, ob
),
1071 f
->pc_begin
, &pc_begin
);
1072 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
1074 if ((_Unwind_Ptr
) pc
< pc_begin
)
1076 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
1086 search_object (struct object
* ob
, void *pc
)
1088 /* The fast path initializes objects eagerly to avoid locking.
1089 * On the slow path we initialize them now */
1090 #ifndef ATOMIC_FDE_FAST_PATH
1091 /* If the data hasn't been sorted, try to do this now. We may have
1092 more memory available than last time we tried. */
1093 if (! ob
->s
.b
.sorted
)
1097 /* Despite the above comment, the normal reason to get here is
1098 that we've not processed this object before. A quick range
1099 check is in order. */
1100 if (pc
< ob
->pc_begin
)
1107 if (ob
->s
.b
.mixed_encoding
)
1108 return binary_search_mixed_encoding_fdes (ob
, pc
);
1109 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
1110 return binary_search_unencoded_fdes (ob
, pc
);
1112 return binary_search_single_encoding_fdes (ob
, pc
);
1116 /* Long slow laborious linear search, cos we've no memory. */
1117 if (ob
->s
.b
.from_array
)
1120 for (p
= ob
->u
.array
; *p
; p
++)
1122 const fde
*f
= linear_search_fdes (ob
, *p
, pc
);
1129 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
1134 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
1137 const fde
*f
= NULL
;
1139 #ifdef ATOMIC_FDE_FAST_PATH
1140 ob
= btree_lookup (®istered_frames
, (uintptr_type
) pc
);
1144 f
= search_object (ob
, pc
);
1147 init_object_mutex_once ();
1148 __gthread_mutex_lock (&object_mutex
);
1150 /* Linear search through the classified objects, to find the one
1151 containing the pc. Note that pc_begin is sorted descending, and
1152 we expect objects to be non-overlapping. */
1153 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1154 if (pc
>= ob
->pc_begin
)
1156 f
= search_object (ob
, pc
);
1162 /* Classify and search the objects we've not yet processed. */
1163 while ((ob
= unseen_objects
))
1167 unseen_objects
= ob
->next
;
1168 f
= search_object (ob
, pc
);
1170 /* Insert the object into the classified list. */
1171 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1172 if ((*p
)->pc_begin
< ob
->pc_begin
)
1182 __gthread_mutex_unlock (&object_mutex
);
1190 bases
->tbase
= ob
->tbase
;
1191 bases
->dbase
= ob
->dbase
;
1193 encoding
= ob
->s
.b
.encoding
;
1194 if (ob
->s
.b
.mixed_encoding
)
1195 encoding
= get_fde_encoding (f
);
1196 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1197 f
->pc_begin
, &func
);
1198 bases
->func
= (void *) func
;