1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2024 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #ifndef _Unwind_Find_FDE
29 #include "coretypes.h"
31 #include "libgcc_tm.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
39 #if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41 #define ATOMIC_FDE_FAST_PATH 1
45 typedef __UINTPTR_TYPE__ uintptr_type
;
47 #ifdef ATOMIC_FDE_FAST_PATH
48 #include "unwind-dw2-btree.h"
50 static struct btree registered_frames
;
51 static struct btree registered_objects
;
52 static bool in_shutdown
;
55 release_registered_frames (void) __attribute__ ((destructor
));
57 release_registered_frames (void)
59 /* Release the b-tree and all frames. Frame releases that happen later are
61 btree_destroy (®istered_frames
);
62 btree_destroy (®istered_objects
);
67 get_pc_range (const struct object
*ob
, uintptr_type
*range
);
70 /* Without fast path frame deregistration must always succeed. */
71 static const int in_shutdown
= 0;
73 /* The unseen_objects list contains objects that have been registered
74 but not yet categorized in any way. The seen_objects list has had
75 its pc_begin and count fields initialized at minimum, and is sorted
76 by decreasing value of pc_begin. */
77 static struct object
*unseen_objects
;
78 static struct object
*seen_objects
;
81 #ifdef __GTHREAD_MUTEX_INIT
82 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
83 #define init_object_mutex_once()
85 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
86 static __gthread_mutex_t object_mutex
;
89 init_object_mutex (void)
91 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
95 init_object_mutex_once (void)
97 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
98 __gthread_once (&once
, init_object_mutex
);
101 /* ??? Several targets include this file with stubbing parts of gthr.h
102 and expect no locking to be done. */
103 #define init_object_mutex_once()
104 static __gthread_mutex_t object_mutex
;
108 #ifdef ATOMIC_FDE_FAST_PATH
109 // Register the pc range for a given object in the lookup structure.
111 register_pc_range_for_object (uintptr_type begin
, struct object
*ob
)
113 // Register the object itself to know the base pointer on deregistration.
114 btree_insert (®istered_objects
, begin
, 1, ob
);
116 // Register the frame in the b-tree
117 uintptr_type range
[2];
118 get_pc_range (ob
, range
);
119 btree_insert (®istered_frames
, range
[0], range
[1] - range
[0], ob
);
123 /* Called from crtbegin.o to register the unwind info for an object. */
126 __register_frame_info_bases (const void *begin
, struct object
*ob
,
127 void *tbase
, void *dbase
)
129 /* If .eh_frame is empty, don't register at all. */
130 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
133 ob
->pc_begin
= (void *)-1;
136 ob
->u
.single
= begin
;
138 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
139 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
143 #ifdef ATOMIC_FDE_FAST_PATH
144 register_pc_range_for_object ((uintptr_type
) begin
, ob
);
146 init_object_mutex_once ();
147 __gthread_mutex_lock (&object_mutex
);
149 ob
->next
= unseen_objects
;
152 __gthread_mutex_unlock (&object_mutex
);
157 __register_frame_info (const void *begin
, struct object
*ob
)
159 __register_frame_info_bases (begin
, ob
, 0, 0);
163 __register_frame (void *begin
)
167 /* If .eh_frame is empty, don't register at all. */
168 if (*(uword
*) begin
== 0)
171 ob
= malloc (sizeof (struct object
));
172 __register_frame_info (begin
, ob
);
175 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
176 for different translation units. Called from the file generated by
180 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
181 void *tbase
, void *dbase
)
183 ob
->pc_begin
= (void *)-1;
188 ob
->s
.b
.from_array
= 1;
189 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
191 #ifdef ATOMIC_FDE_FAST_PATH
192 register_pc_range_for_object ((uintptr_type
) begin
, ob
);
194 init_object_mutex_once ();
195 __gthread_mutex_lock (&object_mutex
);
197 ob
->next
= unseen_objects
;
200 __gthread_mutex_unlock (&object_mutex
);
205 __register_frame_info_table (void *begin
, struct object
*ob
)
207 __register_frame_info_table_bases (begin
, ob
, 0, 0);
211 __register_frame_table (void *begin
)
213 struct object
*ob
= malloc (sizeof (struct object
));
214 __register_frame_info_table (begin
, ob
);
217 /* Called from crtbegin.o to deregister the unwind info for an object. */
218 /* ??? Glibc has for a while now exported __register_frame_info and
219 __deregister_frame_info. If we call __register_frame_info_bases
220 from crtbegin (wherein it is declared weak), and this object does
221 not get pulled from libgcc.a for other reasons, then the
222 invocation of __deregister_frame_info will be resolved from glibc.
223 Since the registration did not happen there, we'll die.
225 Therefore, declare a new deregistration entry point that does the
226 exact same thing, but will resolve to the same library as
227 implements __register_frame_info_bases. */
230 __deregister_frame_info_bases (const void *begin
)
232 struct object
*ob
= 0;
234 /* If .eh_frame is empty, we haven't registered. */
235 if ((const uword
*) begin
== 0 || *(const uword
*) begin
== 0)
238 #ifdef ATOMIC_FDE_FAST_PATH
239 // Find the originally registered object to get the base pointer.
240 ob
= btree_remove (®istered_objects
, (uintptr_type
) begin
);
242 // Remove the corresponding PC range.
245 uintptr_type range
[2];
246 get_pc_range (ob
, range
);
247 if (range
[0] != range
[1])
248 btree_remove (®istered_frames
, range
[0]);
251 // Deallocate the sort array if any.
252 if (ob
&& ob
->s
.b
.sorted
)
257 init_object_mutex_once ();
258 __gthread_mutex_lock (&object_mutex
);
261 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
262 if ((*p
)->u
.single
== begin
)
269 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
270 if ((*p
)->s
.b
.sorted
)
272 if ((*p
)->u
.sort
->orig_data
== begin
)
282 if ((*p
)->u
.single
== begin
)
291 __gthread_mutex_unlock (&object_mutex
);
294 // If we didn't find anything in the lookup data structures then they
295 // were either already destroyed or we tried to remove an empty range.
296 gcc_assert (in_shutdown
|| ob
);
301 __deregister_frame_info (const void *begin
)
303 return __deregister_frame_info_bases (begin
);
307 __deregister_frame (void *begin
)
309 /* If .eh_frame is empty, we haven't registered. */
310 if (*(uword
*) begin
!= 0)
311 free (__deregister_frame_info (begin
));
315 /* Like base_of_encoded_value, but take the base from a struct object
316 instead of an _Unwind_Context. */
319 base_from_object (unsigned char encoding
, const struct object
*ob
)
321 if (encoding
== DW_EH_PE_omit
)
324 switch (encoding
& 0x70)
326 case DW_EH_PE_absptr
:
328 case DW_EH_PE_aligned
:
331 case DW_EH_PE_textrel
:
332 return (_Unwind_Ptr
) ob
->tbase
;
333 case DW_EH_PE_datarel
:
334 return (_Unwind_Ptr
) ob
->dbase
;
340 /* Return the FDE pointer encoding from the CIE. */
341 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
344 get_cie_encoding (const struct dwarf_cie
*cie
)
346 const unsigned char *aug
, *p
;
351 aug
= cie
->augmentation
;
352 p
= aug
+ strlen ((const char *)aug
) + 1; /* Skip the augmentation string. */
353 if (__builtin_expect (cie
->version
>= 4, 0))
355 if (p
[0] != sizeof (void *) || p
[1] != 0)
356 return DW_EH_PE_omit
; /* We are not prepared to handle unexpected
357 address sizes or segment selectors. */
358 p
+= 2; /* Skip address size and segment size. */
362 return DW_EH_PE_absptr
;
364 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
365 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
366 if (cie
->version
== 1) /* Skip return address column. */
369 p
= read_uleb128 (p
, &utmp
);
371 aug
++; /* Skip 'z' */
372 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
375 /* This is what we're looking for. */
378 /* Personality encoding and pointer. */
379 else if (*aug
== 'P')
381 /* ??? Avoid dereferencing indirect pointers, since we're
382 faking the base address. Gotta keep DW_EH_PE_aligned
384 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
387 else if (*aug
== 'L')
389 /* aarch64 b-key pointer authentication. */
390 else if (*aug
== 'B')
392 /* Otherwise end of string, or unknown augmentation. */
394 return DW_EH_PE_absptr
;
400 get_fde_encoding (const struct dwarf_fde
*f
)
402 return get_cie_encoding (get_cie (f
));
406 /* Sorting an array of FDEs by address.
407 (Ideally we would have the linker sort the FDEs so we don't have to do
408 it at run time. But the linkers are not yet prepared for this.) */
410 /* Comparison routines. Three variants of increasing complexity. */
413 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
414 const fde
*x
, const fde
*y
)
416 _Unwind_Ptr x_ptr
, y_ptr
;
417 memcpy (&x_ptr
, x
->pc_begin
, sizeof (_Unwind_Ptr
));
418 memcpy (&y_ptr
, y
->pc_begin
, sizeof (_Unwind_Ptr
));
428 fde_single_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
430 _Unwind_Ptr base
, x_ptr
, y_ptr
;
432 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
433 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
434 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
444 fde_mixed_encoding_compare (struct object
*ob
, const fde
*x
, const fde
*y
)
446 int x_encoding
, y_encoding
;
447 _Unwind_Ptr x_ptr
, y_ptr
;
449 x_encoding
= get_fde_encoding (x
);
450 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
451 x
->pc_begin
, &x_ptr
);
453 y_encoding
= get_fde_encoding (y
);
454 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
455 y
->pc_begin
, &y_ptr
);
464 typedef int (*fde_compare_t
) (struct object
*, const fde
*, const fde
*);
466 // The extractor functions compute the pointer values for a block of
467 // fdes. The block processing hides the call overhead.
470 fde_unencoded_extract (struct object
*ob
__attribute__ ((unused
)),
471 _Unwind_Ptr
*target
, const fde
**x
, int count
)
473 for (int index
= 0; index
< count
; ++index
)
474 memcpy (target
+ index
, x
[index
]->pc_begin
, sizeof (_Unwind_Ptr
));
478 fde_single_encoding_extract (struct object
*ob
, _Unwind_Ptr
*target
,
479 const fde
**x
, int count
)
483 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
484 for (int index
= 0; index
< count
; ++index
)
485 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
[index
]->pc_begin
,
490 fde_mixed_encoding_extract (struct object
*ob
, _Unwind_Ptr
*target
,
491 const fde
**x
, int count
)
493 for (int index
= 0; index
< count
; ++index
)
495 int encoding
= get_fde_encoding (x
[index
]);
496 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
497 x
[index
]->pc_begin
, target
+ index
);
501 typedef void (*fde_extractor_t
) (struct object
*, _Unwind_Ptr
*, const fde
**,
504 // Data is sorted using radix sort if possible, using an temporary
505 // auxiliary data structure of the same size as the input. When running
506 // out of memory do in-place heap sort.
508 struct fde_accumulator
510 struct fde_vector
*linear
;
511 struct fde_vector
*aux
;
515 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
521 size
= sizeof (struct fde_vector
) + sizeof (const fde
*) * count
;
522 if ((accu
->linear
= malloc (size
)))
524 accu
->linear
->count
= 0;
525 if ((accu
->aux
= malloc (size
)))
526 accu
->aux
->count
= 0;
534 fde_insert (struct fde_accumulator
*accu
, const fde
*this_fde
)
537 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
540 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
542 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
543 for the first (root) node; push it down to its rightful place. */
546 frame_downheap (struct object
*ob
, fde_compare_t fde_compare
, const fde
**a
,
551 for (i
= lo
, j
= 2*i
+1;
555 if (j
+1 < hi
&& fde_compare (ob
, a
[j
], a
[j
+1]) < 0)
558 if (fde_compare (ob
, a
[i
], a
[j
]) < 0)
568 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
569 use a name that does not conflict. */
572 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
573 struct fde_vector
*erratic
)
575 /* For a description of this algorithm, see:
576 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
578 const fde
** a
= erratic
->array
;
579 /* A portion of the array is called a "heap" if for all i>=0:
580 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
581 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
582 size_t n
= erratic
->count
;
585 /* Expand our heap incrementally from the end of the array, heapifying
586 each resulting semi-heap as we go. After each step, a[m] is the top
588 for (m
= n
/2-1; m
>= 0; --m
)
589 frame_downheap (ob
, fde_compare
, a
, m
, n
);
591 /* Shrink our heap incrementally from the end of the array, first
592 swapping out the largest element a[0] and then re-heapifying the
593 resulting semi-heap. After each step, a[0..m) is a heap. */
594 for (m
= n
-1; m
>= 1; --m
)
597 frame_downheap (ob
, fde_compare
, a
, 0, m
);
602 // Radix sort data in V1 using V2 as aux memory. Runtime O(n).
604 fde_radixsort (struct object
*ob
, fde_extractor_t fde_extractor
,
605 struct fde_vector
*v1
, struct fde_vector
*v2
)
608 #define FANOUT (1 << FANOUTBITS)
609 #define BLOCKSIZE 128
610 const unsigned rounds
611 = (__CHAR_BIT__
* sizeof (_Unwind_Ptr
) + FANOUTBITS
- 1) / FANOUTBITS
;
612 const fde
**a1
= v1
->array
, **a2
= v2
->array
;
613 _Unwind_Ptr ptrs
[BLOCKSIZE
+ 1];
614 unsigned n
= v1
->count
;
615 for (unsigned round
= 0; round
!= rounds
; ++round
)
617 unsigned counts
[FANOUT
] = {0};
618 unsigned violations
= 0;
620 // Count the number of elements per bucket and check if we are already
622 _Unwind_Ptr last
= 0;
623 for (unsigned i
= 0; i
< n
;)
625 unsigned chunk
= ((n
- i
) <= BLOCKSIZE
) ? (n
- i
) : BLOCKSIZE
;
626 fde_extractor (ob
, ptrs
+ 1, a1
+ i
, chunk
);
628 for (unsigned j
= 0; j
< chunk
; ++j
)
630 unsigned b
= (ptrs
[j
+ 1] >> (round
* FANOUTBITS
)) & (FANOUT
- 1);
632 // Use summation instead of an if to eliminate branches.
633 violations
+= ptrs
[j
+ 1] < ptrs
[j
];
639 // Stop if we are already sorted.
645 // Compute the prefix sum.
647 for (unsigned i
= 0; i
!= FANOUT
; ++i
)
654 // Place all elements.
655 for (unsigned i
= 0; i
< n
;)
657 unsigned chunk
= ((n
- i
) <= BLOCKSIZE
) ? (n
- i
) : BLOCKSIZE
;
658 fde_extractor (ob
, ptrs
, a1
+ i
, chunk
);
659 for (unsigned j
= 0; j
< chunk
; ++j
)
661 unsigned b
= (ptrs
[j
] >> (round
* FANOUTBITS
)) & (FANOUT
- 1);
662 a2
[counts
[b
]++] = a1
[i
+ j
];
668 const fde
**tmp
= a1
;
676 // The data is in a1 now, move in place if needed.
678 memcpy (v1
->array
, a1
, sizeof (const fde
*) * n
);
682 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
684 gcc_assert (!accu
->linear
|| accu
->linear
->count
== count
);
688 fde_extractor_t fde_extractor
;
689 if (ob
->s
.b
.mixed_encoding
)
690 fde_extractor
= fde_mixed_encoding_extract
;
691 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
692 fde_extractor
= fde_unencoded_extract
;
694 fde_extractor
= fde_single_encoding_extract
;
696 fde_radixsort (ob
, fde_extractor
, accu
->linear
, accu
->aux
);
701 fde_compare_t fde_compare
;
702 if (ob
->s
.b
.mixed_encoding
)
703 fde_compare
= fde_mixed_encoding_compare
;
704 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
705 fde_compare
= fde_unencoded_compare
;
707 fde_compare
= fde_single_encoding_compare
;
709 /* We've not managed to malloc an aux array,
710 so heap sort in the linear one. */
711 frame_heapsort (ob
, fde_compare
, accu
->linear
);
715 /* Inspect the fde array beginning at this_fde. This
716 function can be used either in query mode (RANGE is
717 not null, OB is const), or in update mode (RANGE is
718 null, OB is modified). In query mode the function computes
719 the range of PC values and stores it in RANGE. In
720 update mode it updates encoding, mixed_encoding, and pc_begin
721 for OB. Return the number of fdes encountered along the way. */
724 classify_object_over_fdes (struct object
*ob
, const fde
*this_fde
,
727 const struct dwarf_cie
*last_cie
= 0;
729 int encoding
= DW_EH_PE_absptr
;
730 _Unwind_Ptr base
= 0;
732 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
734 const struct dwarf_cie
*this_cie
;
735 _Unwind_Ptr mask
, pc_begin
;
738 if (this_fde
->CIE_delta
== 0)
741 /* Determine the encoding for this FDE. Note mixed encoded
742 objects for later. */
743 this_cie
= get_cie (this_fde
);
744 if (this_cie
!= last_cie
)
747 encoding
= get_cie_encoding (this_cie
);
748 if (encoding
== DW_EH_PE_omit
)
750 base
= base_from_object (encoding
, ob
);
753 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
754 ob
->s
.b
.encoding
= encoding
;
755 else if (ob
->s
.b
.encoding
!= encoding
)
756 ob
->s
.b
.mixed_encoding
= 1;
760 const unsigned char *p
;
761 p
= read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
764 /* Take care to ignore link-once functions that were removed.
765 In these cases, the function address will be NULL, but if
766 the encoding is smaller than a pointer a true NULL may not
767 be representable. Assume 0 in the representable bits is NULL. */
768 mask
= size_of_encoded_value (encoding
);
769 if (mask
< sizeof (void *))
770 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
774 if ((pc_begin
& mask
) == 0)
780 _Unwind_Ptr pc_range
, pc_end
;
781 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
782 pc_end
= pc_begin
+ pc_range
;
783 if ((!range
[0]) && (!range
[1]))
790 if (pc_begin
< range
[0])
792 if (pc_end
> range
[1])
798 if ((void *) pc_begin
< ob
->pc_begin
)
799 ob
->pc_begin
= (void *) pc_begin
;
807 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, const fde
*this_fde
)
809 const struct dwarf_cie
*last_cie
= 0;
810 int encoding
= ob
->s
.b
.encoding
;
811 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
813 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
815 const struct dwarf_cie
*this_cie
;
818 if (this_fde
->CIE_delta
== 0)
821 if (ob
->s
.b
.mixed_encoding
)
823 /* Determine the encoding for this FDE. Note mixed encoded
824 objects for later. */
825 this_cie
= get_cie (this_fde
);
826 if (this_cie
!= last_cie
)
829 encoding
= get_cie_encoding (this_cie
);
830 base
= base_from_object (encoding
, ob
);
834 if (encoding
== DW_EH_PE_absptr
)
837 memcpy (&ptr
, this_fde
->pc_begin
, sizeof (_Unwind_Ptr
));
843 _Unwind_Ptr pc_begin
, mask
;
845 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
848 /* Take care to ignore link-once functions that were removed.
849 In these cases, the function address will be NULL, but if
850 the encoding is smaller than a pointer a true NULL may not
851 be representable. Assume 0 in the representable bits is NULL. */
852 mask
= size_of_encoded_value (encoding
);
853 if (mask
< sizeof (void *))
854 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
858 if ((pc_begin
& mask
) == 0)
862 fde_insert (accu
, this_fde
);
866 /* Set up a sorted array of pointers to FDEs for a loaded object. We
867 count up the entries before allocating the array because it's likely to
868 be faster. We can be called multiple times, should we have failed to
869 allocate a sorted fde array on a previous occasion. */
872 init_object (struct object
* ob
)
874 struct fde_accumulator accu
;
877 count
= ob
->s
.b
.count
;
880 if (ob
->s
.b
.from_array
)
882 fde
**p
= ob
->u
.array
;
883 for (count
= 0; *p
; ++p
)
885 size_t cur_count
= classify_object_over_fdes (ob
, *p
, NULL
);
886 if (cur_count
== (size_t) -1)
893 count
= classify_object_over_fdes (ob
, ob
->u
.single
, NULL
);
894 if (count
== (size_t) -1)
896 static const fde terminator
;
899 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
900 ob
->u
.single
= &terminator
;
905 /* The count field we have in the main struct object is somewhat
906 limited, but should suffice for virtually all cases. If the
907 counted value doesn't fit, re-write a zero. The worst that
908 happens is that we re-count next time -- admittedly non-trivial
909 in that this implies some 2M fdes, but at least we function. */
910 ob
->s
.b
.count
= count
;
911 if (ob
->s
.b
.count
!= count
)
915 if (!start_fde_sort (&accu
, count
))
918 if (ob
->s
.b
.from_array
)
921 for (p
= ob
->u
.array
; *p
; ++p
)
922 add_fdes (ob
, &accu
, *p
);
925 add_fdes (ob
, &accu
, ob
->u
.single
);
927 end_fde_sort (ob
, &accu
, count
);
929 /* Save the original fde pointer, since this is the key by which the
930 DSO will deregister the object. */
931 accu
.linear
->orig_data
= ob
->u
.single
;
932 ob
->u
.sort
= accu
.linear
;
934 #ifdef ATOMIC_FDE_FAST_PATH
935 // We must update the sorted bit with an atomic operation
939 __atomic_store (&(ob
->s
.b
), &(tmp
.s
.b
), __ATOMIC_RELEASE
);
945 #ifdef ATOMIC_FDE_FAST_PATH
946 /* Get the PC range for lookup */
948 get_pc_range (const struct object
*ob
, uintptr_type
*range
)
950 // It is safe to cast to non-const object* here as
951 // classify_object_over_fdes does not modify ob in query mode.
952 struct object
*ncob
= (struct object
*) (uintptr_type
) ob
;
953 range
[0] = range
[1] = 0;
956 classify_object_over_fdes (ncob
, ob
->u
.sort
->orig_data
, range
);
958 else if (ob
->s
.b
.from_array
)
960 fde
**p
= ob
->u
.array
;
962 classify_object_over_fdes (ncob
, *p
, range
);
966 classify_object_over_fdes (ncob
, ob
->u
.single
, range
);
971 /* A linear search through a set of FDEs for the given PC. This is
972 used when there was insufficient memory to allocate and sort an
976 linear_search_fdes (struct object
*ob
, const fde
*this_fde
, void *pc
)
978 const struct dwarf_cie
*last_cie
= 0;
979 int encoding
= ob
->s
.b
.encoding
;
980 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
982 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
984 const struct dwarf_cie
*this_cie
;
985 _Unwind_Ptr pc_begin
, pc_range
;
988 if (this_fde
->CIE_delta
== 0)
991 if (ob
->s
.b
.mixed_encoding
)
993 /* Determine the encoding for this FDE. Note mixed encoded
994 objects for later. */
995 this_cie
= get_cie (this_fde
);
996 if (this_cie
!= last_cie
)
999 encoding
= get_cie_encoding (this_cie
);
1000 base
= base_from_object (encoding
, ob
);
1004 if (encoding
== DW_EH_PE_absptr
)
1006 const _Unwind_Ptr
*pc_array
= (const _Unwind_Ptr
*) this_fde
->pc_begin
;
1007 pc_begin
= pc_array
[0];
1008 pc_range
= pc_array
[1];
1015 const unsigned char *p
;
1017 p
= read_encoded_value_with_base (encoding
, base
,
1018 this_fde
->pc_begin
, &pc_begin
);
1019 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
1021 /* Take care to ignore link-once functions that were removed.
1022 In these cases, the function address will be NULL, but if
1023 the encoding is smaller than a pointer a true NULL may not
1024 be representable. Assume 0 in the representable bits is NULL. */
1025 mask
= size_of_encoded_value (encoding
);
1026 if (mask
< sizeof (void *))
1027 mask
= (((_Unwind_Ptr
) 1) << (mask
<< 3)) - 1;
1031 if ((pc_begin
& mask
) == 0)
1035 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
1042 /* Binary search for an FDE containing the given PC. Here are three
1043 implementations of increasing complexity. */
1045 static inline const fde
*
1046 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
1048 struct fde_vector
*vec
= ob
->u
.sort
;
1051 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
1053 size_t i
= (lo
+ hi
) / 2;
1054 const fde
*const f
= vec
->array
[i
];
1057 memcpy (&pc_begin
, (const void * const *) f
->pc_begin
, sizeof (void *));
1058 memcpy (&pc_range
, (const uaddr
*) f
->pc_begin
+ 1, sizeof (uaddr
));
1062 else if (pc
>= pc_begin
+ pc_range
)
1071 static inline const fde
*
1072 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
1074 struct fde_vector
*vec
= ob
->u
.sort
;
1075 int encoding
= ob
->s
.b
.encoding
;
1076 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
1079 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
1081 size_t i
= (lo
+ hi
) / 2;
1082 const fde
*f
= vec
->array
[i
];
1083 _Unwind_Ptr pc_begin
, pc_range
;
1084 const unsigned char *p
;
1086 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
1088 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
1090 if ((_Unwind_Ptr
) pc
< pc_begin
)
1092 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
1101 static inline const fde
*
1102 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
1104 struct fde_vector
*vec
= ob
->u
.sort
;
1107 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
1109 size_t i
= (lo
+ hi
) / 2;
1110 const fde
*f
= vec
->array
[i
];
1111 _Unwind_Ptr pc_begin
, pc_range
;
1112 const unsigned char *p
;
1115 encoding
= get_fde_encoding (f
);
1116 p
= read_encoded_value_with_base (encoding
,
1117 base_from_object (encoding
, ob
),
1118 f
->pc_begin
, &pc_begin
);
1119 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
1121 if ((_Unwind_Ptr
) pc
< pc_begin
)
1123 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
1133 search_object (struct object
* ob
, void *pc
)
1135 /* The fast path initializes objects eagerly to avoid locking.
1136 * On the slow path we initialize them now */
1137 #ifndef ATOMIC_FDE_FAST_PATH
1138 /* If the data hasn't been sorted, try to do this now. We may have
1139 more memory available than last time we tried. */
1140 if (! ob
->s
.b
.sorted
)
1144 /* Despite the above comment, the normal reason to get here is
1145 that we've not processed this object before. A quick range
1146 check is in order. */
1147 if (pc
< ob
->pc_begin
)
1154 if (ob
->s
.b
.mixed_encoding
)
1155 return binary_search_mixed_encoding_fdes (ob
, pc
);
1156 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
1157 return binary_search_unencoded_fdes (ob
, pc
);
1159 return binary_search_single_encoding_fdes (ob
, pc
);
1163 /* Long slow laborious linear search, cos we've no memory. */
1164 if (ob
->s
.b
.from_array
)
1167 for (p
= ob
->u
.array
; *p
; p
++)
1169 const fde
*f
= linear_search_fdes (ob
, *p
, pc
);
1176 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
1180 #ifdef ATOMIC_FDE_FAST_PATH
1182 // Check if the object was already initialized
1184 is_object_initialized (struct object
*ob
)
1186 // We have to use acquire atomics for the read, which
1187 // is a bit involved as we read from a bitfield
1189 __atomic_load (&(ob
->s
.b
), &(tmp
.s
.b
), __ATOMIC_ACQUIRE
);
1190 return tmp
.s
.b
.sorted
;
1196 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
1199 const fde
*f
= NULL
;
1201 #ifdef ATOMIC_FDE_FAST_PATH
1202 ob
= btree_lookup (®istered_frames
, (uintptr_type
) pc
);
1206 // Initialize the object lazily
1207 if (!is_object_initialized (ob
))
1209 // Check again under mutex
1210 init_object_mutex_once ();
1211 __gthread_mutex_lock (&object_mutex
);
1213 if (!ob
->s
.b
.sorted
)
1218 __gthread_mutex_unlock (&object_mutex
);
1221 f
= search_object (ob
, pc
);
1224 init_object_mutex_once ();
1225 __gthread_mutex_lock (&object_mutex
);
1227 /* Linear search through the classified objects, to find the one
1228 containing the pc. Note that pc_begin is sorted descending, and
1229 we expect objects to be non-overlapping. */
1230 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1231 if (pc
>= ob
->pc_begin
)
1233 f
= search_object (ob
, pc
);
1239 /* Classify and search the objects we've not yet processed. */
1240 while ((ob
= unseen_objects
))
1244 unseen_objects
= ob
->next
;
1245 f
= search_object (ob
, pc
);
1247 /* Insert the object into the classified list. */
1248 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1249 if ((*p
)->pc_begin
< ob
->pc_begin
)
1259 __gthread_mutex_unlock (&object_mutex
);
1267 bases
->tbase
= ob
->tbase
;
1268 bases
->dbase
= ob
->dbase
;
1270 encoding
= ob
->s
.b
.encoding
;
1271 if (ob
->s
.b
.mixed_encoding
)
1272 encoding
= get_fde_encoding (f
);
1273 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1274 f
->pc_begin
, &func
);
1275 bases
->func
= (void *) func
;