c++: partially inst requires-expr in noexcept-spec [PR101043]
[official-gcc.git] / libgcc / unwind-dw2-fde.c
blob51129906fac0aae8679b5d840d180c98b9149030
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2023 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #ifndef _Unwind_Find_FDE
27 #include "tconfig.h"
28 #include "tsystem.h"
29 #include "coretypes.h"
30 #include "tm.h"
31 #include "libgcc_tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #else
39 #if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41 #define ATOMIC_FDE_FAST_PATH 1
42 #endif
43 #endif
45 typedef __UINTPTR_TYPE__ uintptr_type;
47 #ifdef ATOMIC_FDE_FAST_PATH
48 #include "unwind-dw2-btree.h"
50 static struct btree registered_frames;
51 static bool in_shutdown;
53 static void
54 release_registered_frames (void) __attribute__ ((destructor));
55 static void
56 release_registered_frames (void)
58 /* Release the b-tree and all frames. Frame releases that happen later are
59 * silently ignored */
60 btree_destroy (&registered_frames);
61 in_shutdown = true;
64 static void
65 get_pc_range (const struct object *ob, uintptr_type *range);
67 #else
68 /* Without fast path frame deregistration must always succeed. */
69 static const int in_shutdown = 0;
71 /* The unseen_objects list contains objects that have been registered
72 but not yet categorized in any way. The seen_objects list has had
73 its pc_begin and count fields initialized at minimum, and is sorted
74 by decreasing value of pc_begin. */
75 static struct object *unseen_objects;
76 static struct object *seen_objects;
77 #endif
79 #ifdef __GTHREAD_MUTEX_INIT
80 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
81 #define init_object_mutex_once()
82 #else
83 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
84 static __gthread_mutex_t object_mutex;
86 static void
87 init_object_mutex (void)
89 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
92 static void
93 init_object_mutex_once (void)
95 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
96 __gthread_once (&once, init_object_mutex);
98 #else
99 /* ??? Several targets include this file with stubbing parts of gthr.h
100 and expect no locking to be done. */
101 #define init_object_mutex_once()
102 static __gthread_mutex_t object_mutex;
103 #endif
104 #endif
106 /* Called from crtbegin.o to register the unwind info for an object. */
108 void
109 __register_frame_info_bases (const void *begin, struct object *ob,
110 void *tbase, void *dbase)
112 /* If .eh_frame is empty, don't register at all. */
113 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
114 return;
116 ob->pc_begin = (void *)-1;
117 ob->tbase = tbase;
118 ob->dbase = dbase;
119 ob->u.single = begin;
120 ob->s.i = 0;
121 ob->s.b.encoding = DW_EH_PE_omit;
122 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
123 ob->fde_end = NULL;
124 #endif
126 #ifdef ATOMIC_FDE_FAST_PATH
127 // Register the object itself to know the base pointer on deregistration.
128 btree_insert (&registered_frames, (uintptr_type) begin, 1, ob);
130 // Register the frame in the b-tree
131 uintptr_type range[2];
132 get_pc_range (ob, range);
133 btree_insert (&registered_frames, range[0], range[1] - range[0], ob);
134 #else
135 init_object_mutex_once ();
136 __gthread_mutex_lock (&object_mutex);
138 ob->next = unseen_objects;
139 unseen_objects = ob;
141 __gthread_mutex_unlock (&object_mutex);
142 #endif
145 void
146 __register_frame_info (const void *begin, struct object *ob)
148 __register_frame_info_bases (begin, ob, 0, 0);
151 void
152 __register_frame (void *begin)
154 struct object *ob;
156 /* If .eh_frame is empty, don't register at all. */
157 if (*(uword *) begin == 0)
158 return;
160 ob = malloc (sizeof (struct object));
161 __register_frame_info (begin, ob);
164 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
165 for different translation units. Called from the file generated by
166 collect2. */
168 void
169 __register_frame_info_table_bases (void *begin, struct object *ob,
170 void *tbase, void *dbase)
172 ob->pc_begin = (void *)-1;
173 ob->tbase = tbase;
174 ob->dbase = dbase;
175 ob->u.array = begin;
176 ob->s.i = 0;
177 ob->s.b.from_array = 1;
178 ob->s.b.encoding = DW_EH_PE_omit;
180 #ifdef ATOMIC_FDE_FAST_PATH
181 // Register the object itself to know the base pointer on deregistration.
182 btree_insert (&registered_frames, (uintptr_type) begin, 1, ob);
184 // Register the frame in the b-tree
185 uintptr_type range[2];
186 get_pc_range (ob, range);
187 btree_insert (&registered_frames, range[0], range[1] - range[0], ob);
188 #else
189 init_object_mutex_once ();
190 __gthread_mutex_lock (&object_mutex);
192 ob->next = unseen_objects;
193 unseen_objects = ob;
195 __gthread_mutex_unlock (&object_mutex);
196 #endif
199 void
200 __register_frame_info_table (void *begin, struct object *ob)
202 __register_frame_info_table_bases (begin, ob, 0, 0);
205 void
206 __register_frame_table (void *begin)
208 struct object *ob = malloc (sizeof (struct object));
209 __register_frame_info_table (begin, ob);
212 /* Called from crtbegin.o to deregister the unwind info for an object. */
213 /* ??? Glibc has for a while now exported __register_frame_info and
214 __deregister_frame_info. If we call __register_frame_info_bases
215 from crtbegin (wherein it is declared weak), and this object does
216 not get pulled from libgcc.a for other reasons, then the
217 invocation of __deregister_frame_info will be resolved from glibc.
218 Since the registration did not happen there, we'll die.
220 Therefore, declare a new deregistration entry point that does the
221 exact same thing, but will resolve to the same library as
222 implements __register_frame_info_bases. */
224 void *
225 __deregister_frame_info_bases (const void *begin)
227 struct object *ob = 0;
229 /* If .eh_frame is empty, we haven't registered. */
230 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
231 return ob;
233 #ifdef ATOMIC_FDE_FAST_PATH
234 // Find the originally registered object to get the base pointer.
235 ob = btree_remove (&registered_frames, (uintptr_type) begin);
237 // Remove the corresponding PC range.
238 if (ob)
240 uintptr_type range[2];
241 get_pc_range (ob, range);
242 if (range[0] != range[1])
243 btree_remove (&registered_frames, range[0]);
246 // Deallocate the sort array if any.
247 if (ob && ob->s.b.sorted)
249 free (ob->u.sort);
251 #else
252 init_object_mutex_once ();
253 __gthread_mutex_lock (&object_mutex);
255 struct object **p;
256 for (p = &unseen_objects; *p ; p = &(*p)->next)
257 if ((*p)->u.single == begin)
259 ob = *p;
260 *p = ob->next;
261 goto out;
264 for (p = &seen_objects; *p ; p = &(*p)->next)
265 if ((*p)->s.b.sorted)
267 if ((*p)->u.sort->orig_data == begin)
269 ob = *p;
270 *p = ob->next;
271 free (ob->u.sort);
272 goto out;
275 else
277 if ((*p)->u.single == begin)
279 ob = *p;
280 *p = ob->next;
281 goto out;
285 out:
286 __gthread_mutex_unlock (&object_mutex);
287 #endif
289 // If we didn't find anything in the lookup data structures then they
290 // were either already destroyed or we tried to remove an empty range.
291 gcc_assert (in_shutdown || ob);
292 return (void *) ob;
295 void *
296 __deregister_frame_info (const void *begin)
298 return __deregister_frame_info_bases (begin);
301 void
302 __deregister_frame (void *begin)
304 /* If .eh_frame is empty, we haven't registered. */
305 if (*(uword *) begin != 0)
306 free (__deregister_frame_info (begin));
310 /* Like base_of_encoded_value, but take the base from a struct object
311 instead of an _Unwind_Context. */
313 static _Unwind_Ptr
314 base_from_object (unsigned char encoding, const struct object *ob)
316 if (encoding == DW_EH_PE_omit)
317 return 0;
319 switch (encoding & 0x70)
321 case DW_EH_PE_absptr:
322 case DW_EH_PE_pcrel:
323 case DW_EH_PE_aligned:
324 return 0;
326 case DW_EH_PE_textrel:
327 return (_Unwind_Ptr) ob->tbase;
328 case DW_EH_PE_datarel:
329 return (_Unwind_Ptr) ob->dbase;
330 default:
331 gcc_unreachable ();
335 /* Return the FDE pointer encoding from the CIE. */
336 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
338 static int
339 get_cie_encoding (const struct dwarf_cie *cie)
341 const unsigned char *aug, *p;
342 _Unwind_Ptr dummy;
343 _uleb128_t utmp;
344 _sleb128_t stmp;
346 aug = cie->augmentation;
347 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
348 if (__builtin_expect (cie->version >= 4, 0))
350 if (p[0] != sizeof (void *) || p[1] != 0)
351 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
352 address sizes or segment selectors. */
353 p += 2; /* Skip address size and segment size. */
356 if (aug[0] != 'z')
357 return DW_EH_PE_absptr;
359 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
360 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
361 if (cie->version == 1) /* Skip return address column. */
362 p++;
363 else
364 p = read_uleb128 (p, &utmp);
366 aug++; /* Skip 'z' */
367 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
368 while (1)
370 /* This is what we're looking for. */
371 if (*aug == 'R')
372 return *p;
373 /* Personality encoding and pointer. */
374 else if (*aug == 'P')
376 /* ??? Avoid dereferencing indirect pointers, since we're
377 faking the base address. Gotta keep DW_EH_PE_aligned
378 intact, however. */
379 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
381 /* LSDA encoding. */
382 else if (*aug == 'L')
383 p++;
384 /* aarch64 b-key pointer authentication. */
385 else if (*aug == 'B')
386 p++;
387 /* Otherwise end of string, or unknown augmentation. */
388 else
389 return DW_EH_PE_absptr;
390 aug++;
394 static inline int
395 get_fde_encoding (const struct dwarf_fde *f)
397 return get_cie_encoding (get_cie (f));
401 /* Sorting an array of FDEs by address.
402 (Ideally we would have the linker sort the FDEs so we don't have to do
403 it at run time. But the linkers are not yet prepared for this.) */
405 /* Comparison routines. Three variants of increasing complexity. */
407 static int
408 fde_unencoded_compare (struct object *ob __attribute__((unused)),
409 const fde *x, const fde *y)
411 _Unwind_Ptr x_ptr, y_ptr;
412 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
413 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
415 if (x_ptr > y_ptr)
416 return 1;
417 if (x_ptr < y_ptr)
418 return -1;
419 return 0;
422 static int
423 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
425 _Unwind_Ptr base, x_ptr, y_ptr;
427 base = base_from_object (ob->s.b.encoding, ob);
428 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
429 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
431 if (x_ptr > y_ptr)
432 return 1;
433 if (x_ptr < y_ptr)
434 return -1;
435 return 0;
438 static int
439 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
441 int x_encoding, y_encoding;
442 _Unwind_Ptr x_ptr, y_ptr;
444 x_encoding = get_fde_encoding (x);
445 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
446 x->pc_begin, &x_ptr);
448 y_encoding = get_fde_encoding (y);
449 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
450 y->pc_begin, &y_ptr);
452 if (x_ptr > y_ptr)
453 return 1;
454 if (x_ptr < y_ptr)
455 return -1;
456 return 0;
459 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
461 // The extractor functions compute the pointer values for a block of
462 // fdes. The block processing hides the call overhead.
464 static void
465 fde_unencoded_extract (struct object *ob __attribute__ ((unused)),
466 _Unwind_Ptr *target, const fde **x, int count)
468 for (int index = 0; index < count; ++index)
469 memcpy (target + index, x[index]->pc_begin, sizeof (_Unwind_Ptr));
472 static void
473 fde_single_encoding_extract (struct object *ob, _Unwind_Ptr *target,
474 const fde **x, int count)
476 _Unwind_Ptr base;
478 base = base_from_object (ob->s.b.encoding, ob);
479 for (int index = 0; index < count; ++index)
480 read_encoded_value_with_base (ob->s.b.encoding, base, x[index]->pc_begin,
481 target + index);
484 static void
485 fde_mixed_encoding_extract (struct object *ob, _Unwind_Ptr *target,
486 const fde **x, int count)
488 for (int index = 0; index < count; ++index)
490 int encoding = get_fde_encoding (x[index]);
491 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
492 x[index]->pc_begin, target + index);
496 typedef void (*fde_extractor_t) (struct object *, _Unwind_Ptr *, const fde **,
497 int);
499 // Data is is sorted using radix sort if possible, using an temporary
500 // auxiliary data structure of the same size as the input. When running
501 // out of memory do in-place heap sort.
503 struct fde_accumulator
505 struct fde_vector *linear;
506 struct fde_vector *aux;
509 static inline int
510 start_fde_sort (struct fde_accumulator *accu, size_t count)
512 size_t size;
513 if (! count)
514 return 0;
516 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
517 if ((accu->linear = malloc (size)))
519 accu->linear->count = 0;
520 if ((accu->aux = malloc (size)))
521 accu->aux->count = 0;
522 return 1;
524 else
525 return 0;
528 static inline void
529 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
531 if (accu->linear)
532 accu->linear->array[accu->linear->count++] = this_fde;
535 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
537 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
538 for the first (root) node; push it down to its rightful place. */
540 static void
541 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
542 int lo, int hi)
544 int i, j;
546 for (i = lo, j = 2*i+1;
547 j < hi;
548 j = 2*i+1)
550 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
551 ++j;
553 if (fde_compare (ob, a[i], a[j]) < 0)
555 SWAP (a[i], a[j]);
556 i = j;
558 else
559 break;
563 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
564 use a name that does not conflict. */
566 static void
567 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
568 struct fde_vector *erratic)
570 /* For a description of this algorithm, see:
571 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
572 p. 60-61. */
573 const fde ** a = erratic->array;
574 /* A portion of the array is called a "heap" if for all i>=0:
575 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
576 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
577 size_t n = erratic->count;
578 int m;
580 /* Expand our heap incrementally from the end of the array, heapifying
581 each resulting semi-heap as we go. After each step, a[m] is the top
582 of a heap. */
583 for (m = n/2-1; m >= 0; --m)
584 frame_downheap (ob, fde_compare, a, m, n);
586 /* Shrink our heap incrementally from the end of the array, first
587 swapping out the largest element a[0] and then re-heapifying the
588 resulting semi-heap. After each step, a[0..m) is a heap. */
589 for (m = n-1; m >= 1; --m)
591 SWAP (a[0], a[m]);
592 frame_downheap (ob, fde_compare, a, 0, m);
594 #undef SWAP
597 // Radix sort data in V1 using V2 as aux memory. Runtime O(n).
598 static inline void
599 fde_radixsort (struct object *ob, fde_extractor_t fde_extractor,
600 struct fde_vector *v1, struct fde_vector *v2)
602 #define FANOUTBITS 8
603 #define FANOUT (1 << FANOUTBITS)
604 #define BLOCKSIZE 128
605 const unsigned rounds
606 = (__CHAR_BIT__ * sizeof (_Unwind_Ptr) + FANOUTBITS - 1) / FANOUTBITS;
607 const fde **a1 = v1->array, **a2 = v2->array;
608 _Unwind_Ptr ptrs[BLOCKSIZE + 1];
609 unsigned n = v1->count;
610 for (unsigned round = 0; round != rounds; ++round)
612 unsigned counts[FANOUT] = {0};
613 unsigned violations = 0;
615 // Count the number of elements per bucket and check if we are already
616 // sorted.
617 _Unwind_Ptr last = 0;
618 for (unsigned i = 0; i < n;)
620 unsigned chunk = ((n - i) <= BLOCKSIZE) ? (n - i) : BLOCKSIZE;
621 fde_extractor (ob, ptrs + 1, a1 + i, chunk);
622 ptrs[0] = last;
623 for (unsigned j = 0; j < chunk; ++j)
625 unsigned b = (ptrs[j + 1] >> (round * FANOUTBITS)) & (FANOUT - 1);
626 counts[b]++;
627 // Use summation instead of an if to eliminate branches.
628 violations += ptrs[j + 1] < ptrs[j];
630 i += chunk;
631 last = ptrs[chunk];
634 // Stop if we are already sorted.
635 if (!violations)
637 break;
640 // Compute the prefix sum.
641 unsigned sum = 0;
642 for (unsigned i = 0; i != FANOUT; ++i)
644 unsigned s = sum;
645 sum += counts[i];
646 counts[i] = s;
649 // Place all elements.
650 for (unsigned i = 0; i < n;)
652 unsigned chunk = ((n - i) <= BLOCKSIZE) ? (n - i) : BLOCKSIZE;
653 fde_extractor (ob, ptrs, a1 + i, chunk);
654 for (unsigned j = 0; j < chunk; ++j)
656 unsigned b = (ptrs[j] >> (round * FANOUTBITS)) & (FANOUT - 1);
657 a2[counts[b]++] = a1[i + j];
659 i += chunk;
662 // Swap a1 and a2.
663 const fde **tmp = a1;
664 a1 = a2;
665 a2 = tmp;
667 #undef BLOCKSIZE
668 #undef FANOUT
669 #undef FANOUTBITS
671 // The data is in a1 now, move in place if needed.
672 if (a1 != v1->array)
673 memcpy (v1->array, a1, sizeof (const fde *) * n);
676 static inline void
677 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
679 gcc_assert (!accu->linear || accu->linear->count == count);
681 if (accu->aux)
683 fde_extractor_t fde_extractor;
684 if (ob->s.b.mixed_encoding)
685 fde_extractor = fde_mixed_encoding_extract;
686 else if (ob->s.b.encoding == DW_EH_PE_absptr)
687 fde_extractor = fde_unencoded_extract;
688 else
689 fde_extractor = fde_single_encoding_extract;
691 fde_radixsort (ob, fde_extractor, accu->linear, accu->aux);
692 free (accu->aux);
694 else
696 fde_compare_t fde_compare;
697 if (ob->s.b.mixed_encoding)
698 fde_compare = fde_mixed_encoding_compare;
699 else if (ob->s.b.encoding == DW_EH_PE_absptr)
700 fde_compare = fde_unencoded_compare;
701 else
702 fde_compare = fde_single_encoding_compare;
704 /* We've not managed to malloc an aux array,
705 so heap sort in the linear one. */
706 frame_heapsort (ob, fde_compare, accu->linear);
710 /* Inspect the fde array beginning at this_fde. This
711 function can be used either in query mode (RANGE is
712 not null, OB is const), or in update mode (RANGE is
713 null, OB is modified). In query mode the function computes
714 the range of PC values and stores it in RANGE. In
715 update mode it updates encoding, mixed_encoding, and pc_begin
716 for OB. Return the number of fdes encountered along the way. */
718 static size_t
719 classify_object_over_fdes (struct object *ob, const fde *this_fde,
720 uintptr_type *range)
722 const struct dwarf_cie *last_cie = 0;
723 size_t count = 0;
724 int encoding = DW_EH_PE_absptr;
725 _Unwind_Ptr base = 0;
727 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
729 const struct dwarf_cie *this_cie;
730 _Unwind_Ptr mask, pc_begin;
732 /* Skip CIEs. */
733 if (this_fde->CIE_delta == 0)
734 continue;
736 /* Determine the encoding for this FDE. Note mixed encoded
737 objects for later. */
738 this_cie = get_cie (this_fde);
739 if (this_cie != last_cie)
741 last_cie = this_cie;
742 encoding = get_cie_encoding (this_cie);
743 if (encoding == DW_EH_PE_omit)
744 return -1;
745 base = base_from_object (encoding, ob);
746 if (!range)
748 if (ob->s.b.encoding == DW_EH_PE_omit)
749 ob->s.b.encoding = encoding;
750 else if (ob->s.b.encoding != encoding)
751 ob->s.b.mixed_encoding = 1;
755 const unsigned char *p;
756 p = read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
757 &pc_begin);
759 /* Take care to ignore link-once functions that were removed.
760 In these cases, the function address will be NULL, but if
761 the encoding is smaller than a pointer a true NULL may not
762 be representable. Assume 0 in the representable bits is NULL. */
763 mask = size_of_encoded_value (encoding);
764 if (mask < sizeof (void *))
765 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
766 else
767 mask = -1;
769 if ((pc_begin & mask) == 0)
770 continue;
772 count += 1;
773 if (range)
775 _Unwind_Ptr pc_range, pc_end;
776 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
777 pc_end = pc_begin + pc_range;
778 if ((!range[0]) && (!range[1]))
780 range[0] = pc_begin;
781 range[1] = pc_end;
783 else
785 if (pc_begin < range[0])
786 range[0] = pc_begin;
787 if (pc_end > range[1])
788 range[1] = pc_end;
791 else
793 if ((void *) pc_begin < ob->pc_begin)
794 ob->pc_begin = (void *) pc_begin;
798 return count;
801 static void
802 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
804 const struct dwarf_cie *last_cie = 0;
805 int encoding = ob->s.b.encoding;
806 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
808 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
810 const struct dwarf_cie *this_cie;
812 /* Skip CIEs. */
813 if (this_fde->CIE_delta == 0)
814 continue;
816 if (ob->s.b.mixed_encoding)
818 /* Determine the encoding for this FDE. Note mixed encoded
819 objects for later. */
820 this_cie = get_cie (this_fde);
821 if (this_cie != last_cie)
823 last_cie = this_cie;
824 encoding = get_cie_encoding (this_cie);
825 base = base_from_object (encoding, ob);
829 if (encoding == DW_EH_PE_absptr)
831 _Unwind_Ptr ptr;
832 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
833 if (ptr == 0)
834 continue;
836 else
838 _Unwind_Ptr pc_begin, mask;
840 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
841 &pc_begin);
843 /* Take care to ignore link-once functions that were removed.
844 In these cases, the function address will be NULL, but if
845 the encoding is smaller than a pointer a true NULL may not
846 be representable. Assume 0 in the representable bits is NULL. */
847 mask = size_of_encoded_value (encoding);
848 if (mask < sizeof (void *))
849 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
850 else
851 mask = -1;
853 if ((pc_begin & mask) == 0)
854 continue;
857 fde_insert (accu, this_fde);
861 /* Set up a sorted array of pointers to FDEs for a loaded object. We
862 count up the entries before allocating the array because it's likely to
863 be faster. We can be called multiple times, should we have failed to
864 allocate a sorted fde array on a previous occasion. */
866 static inline void
867 init_object (struct object* ob)
869 struct fde_accumulator accu;
870 size_t count;
872 count = ob->s.b.count;
873 if (count == 0)
875 if (ob->s.b.from_array)
877 fde **p = ob->u.array;
878 for (count = 0; *p; ++p)
880 size_t cur_count = classify_object_over_fdes (ob, *p, NULL);
881 if (cur_count == (size_t) -1)
882 goto unhandled_fdes;
883 count += cur_count;
886 else
888 count = classify_object_over_fdes (ob, ob->u.single, NULL);
889 if (count == (size_t) -1)
891 static const fde terminator;
892 unhandled_fdes:
893 ob->s.i = 0;
894 ob->s.b.encoding = DW_EH_PE_omit;
895 ob->u.single = &terminator;
896 return;
900 /* The count field we have in the main struct object is somewhat
901 limited, but should suffice for virtually all cases. If the
902 counted value doesn't fit, re-write a zero. The worst that
903 happens is that we re-count next time -- admittedly non-trivial
904 in that this implies some 2M fdes, but at least we function. */
905 ob->s.b.count = count;
906 if (ob->s.b.count != count)
907 ob->s.b.count = 0;
910 if (!start_fde_sort (&accu, count))
911 return;
913 if (ob->s.b.from_array)
915 fde **p;
916 for (p = ob->u.array; *p; ++p)
917 add_fdes (ob, &accu, *p);
919 else
920 add_fdes (ob, &accu, ob->u.single);
922 end_fde_sort (ob, &accu, count);
924 /* Save the original fde pointer, since this is the key by which the
925 DSO will deregister the object. */
926 accu.linear->orig_data = ob->u.single;
927 ob->u.sort = accu.linear;
929 #ifdef ATOMIC_FDE_FAST_PATH
930 // We must update the sorted bit with an atomic operation
931 struct object tmp;
932 tmp.s.b = ob->s.b;
933 tmp.s.b.sorted = 1;
934 __atomic_store (&(ob->s.b), &(tmp.s.b), __ATOMIC_RELEASE);
935 #else
936 ob->s.b.sorted = 1;
937 #endif
940 #ifdef ATOMIC_FDE_FAST_PATH
941 /* Get the PC range for lookup */
942 static void
943 get_pc_range (const struct object *ob, uintptr_type *range)
945 // It is safe to cast to non-const object* here as
946 // classify_object_over_fdes does not modify ob in query mode.
947 struct object *ncob = (struct object *) (uintptr_type) ob;
948 range[0] = range[1] = 0;
949 if (ob->s.b.sorted)
951 classify_object_over_fdes (ncob, ob->u.sort->orig_data, range);
953 else if (ob->s.b.from_array)
955 fde **p = ob->u.array;
956 for (; *p; ++p)
957 classify_object_over_fdes (ncob, *p, range);
959 else
961 classify_object_over_fdes (ncob, ob->u.single, range);
964 #endif
966 /* A linear search through a set of FDEs for the given PC. This is
967 used when there was insufficient memory to allocate and sort an
968 array. */
970 static const fde *
971 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
973 const struct dwarf_cie *last_cie = 0;
974 int encoding = ob->s.b.encoding;
975 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
977 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
979 const struct dwarf_cie *this_cie;
980 _Unwind_Ptr pc_begin, pc_range;
982 /* Skip CIEs. */
983 if (this_fde->CIE_delta == 0)
984 continue;
986 if (ob->s.b.mixed_encoding)
988 /* Determine the encoding for this FDE. Note mixed encoded
989 objects for later. */
990 this_cie = get_cie (this_fde);
991 if (this_cie != last_cie)
993 last_cie = this_cie;
994 encoding = get_cie_encoding (this_cie);
995 base = base_from_object (encoding, ob);
999 if (encoding == DW_EH_PE_absptr)
1001 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
1002 pc_begin = pc_array[0];
1003 pc_range = pc_array[1];
1004 if (pc_begin == 0)
1005 continue;
1007 else
1009 _Unwind_Ptr mask;
1010 const unsigned char *p;
1012 p = read_encoded_value_with_base (encoding, base,
1013 this_fde->pc_begin, &pc_begin);
1014 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
1016 /* Take care to ignore link-once functions that were removed.
1017 In these cases, the function address will be NULL, but if
1018 the encoding is smaller than a pointer a true NULL may not
1019 be representable. Assume 0 in the representable bits is NULL. */
1020 mask = size_of_encoded_value (encoding);
1021 if (mask < sizeof (void *))
1022 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
1023 else
1024 mask = -1;
1026 if ((pc_begin & mask) == 0)
1027 continue;
1030 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
1031 return this_fde;
1034 return NULL;
1037 /* Binary search for an FDE containing the given PC. Here are three
1038 implementations of increasing complexity. */
1040 static inline const fde *
1041 binary_search_unencoded_fdes (struct object *ob, void *pc)
1043 struct fde_vector *vec = ob->u.sort;
1044 size_t lo, hi;
1046 for (lo = 0, hi = vec->count; lo < hi; )
1048 size_t i = (lo + hi) / 2;
1049 const fde *const f = vec->array[i];
1050 void *pc_begin;
1051 uaddr pc_range;
1052 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
1053 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
1055 if (pc < pc_begin)
1056 hi = i;
1057 else if (pc >= pc_begin + pc_range)
1058 lo = i + 1;
1059 else
1060 return f;
1063 return NULL;
1066 static inline const fde *
1067 binary_search_single_encoding_fdes (struct object *ob, void *pc)
1069 struct fde_vector *vec = ob->u.sort;
1070 int encoding = ob->s.b.encoding;
1071 _Unwind_Ptr base = base_from_object (encoding, ob);
1072 size_t lo, hi;
1074 for (lo = 0, hi = vec->count; lo < hi; )
1076 size_t i = (lo + hi) / 2;
1077 const fde *f = vec->array[i];
1078 _Unwind_Ptr pc_begin, pc_range;
1079 const unsigned char *p;
1081 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
1082 &pc_begin);
1083 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
1085 if ((_Unwind_Ptr) pc < pc_begin)
1086 hi = i;
1087 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
1088 lo = i + 1;
1089 else
1090 return f;
1093 return NULL;
1096 static inline const fde *
1097 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
1099 struct fde_vector *vec = ob->u.sort;
1100 size_t lo, hi;
1102 for (lo = 0, hi = vec->count; lo < hi; )
1104 size_t i = (lo + hi) / 2;
1105 const fde *f = vec->array[i];
1106 _Unwind_Ptr pc_begin, pc_range;
1107 const unsigned char *p;
1108 int encoding;
1110 encoding = get_fde_encoding (f);
1111 p = read_encoded_value_with_base (encoding,
1112 base_from_object (encoding, ob),
1113 f->pc_begin, &pc_begin);
1114 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
1116 if ((_Unwind_Ptr) pc < pc_begin)
1117 hi = i;
1118 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
1119 lo = i + 1;
1120 else
1121 return f;
1124 return NULL;
1127 static const fde *
1128 search_object (struct object* ob, void *pc)
1130 /* The fast path initializes objects eagerly to avoid locking.
1131 * On the slow path we initialize them now */
1132 #ifndef ATOMIC_FDE_FAST_PATH
1133 /* If the data hasn't been sorted, try to do this now. We may have
1134 more memory available than last time we tried. */
1135 if (! ob->s.b.sorted)
1137 init_object (ob);
1139 /* Despite the above comment, the normal reason to get here is
1140 that we've not processed this object before. A quick range
1141 check is in order. */
1142 if (pc < ob->pc_begin)
1143 return NULL;
1145 #endif
1147 if (ob->s.b.sorted)
1149 if (ob->s.b.mixed_encoding)
1150 return binary_search_mixed_encoding_fdes (ob, pc);
1151 else if (ob->s.b.encoding == DW_EH_PE_absptr)
1152 return binary_search_unencoded_fdes (ob, pc);
1153 else
1154 return binary_search_single_encoding_fdes (ob, pc);
1156 else
1158 /* Long slow laborious linear search, cos we've no memory. */
1159 if (ob->s.b.from_array)
1161 fde **p;
1162 for (p = ob->u.array; *p ; p++)
1164 const fde *f = linear_search_fdes (ob, *p, pc);
1165 if (f)
1166 return f;
1168 return NULL;
1170 else
1171 return linear_search_fdes (ob, ob->u.single, pc);
1175 #ifdef ATOMIC_FDE_FAST_PATH
1177 // Check if the object was already initialized
1178 static inline bool
1179 is_object_initialized (struct object *ob)
1181 // We have to use acquire atomics for the read, which
1182 // is a bit involved as we read from a bitfield
1183 struct object tmp;
1184 __atomic_load (&(ob->s.b), &(tmp.s.b), __ATOMIC_ACQUIRE);
1185 return tmp.s.b.sorted;
1188 #endif
1190 const fde *
1191 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1193 struct object *ob;
1194 const fde *f = NULL;
1196 #ifdef ATOMIC_FDE_FAST_PATH
1197 ob = btree_lookup (&registered_frames, (uintptr_type) pc);
1198 if (!ob)
1199 return NULL;
1201 // Initialize the object lazily
1202 if (!is_object_initialized (ob))
1204 // Check again under mutex
1205 init_object_mutex_once ();
1206 __gthread_mutex_lock (&object_mutex);
1208 if (!ob->s.b.sorted)
1210 init_object (ob);
1213 __gthread_mutex_unlock (&object_mutex);
1216 f = search_object (ob, pc);
1217 #else
1219 init_object_mutex_once ();
1220 __gthread_mutex_lock (&object_mutex);
1222 /* Linear search through the classified objects, to find the one
1223 containing the pc. Note that pc_begin is sorted descending, and
1224 we expect objects to be non-overlapping. */
1225 for (ob = seen_objects; ob; ob = ob->next)
1226 if (pc >= ob->pc_begin)
1228 f = search_object (ob, pc);
1229 if (f)
1230 goto fini;
1231 break;
1234 /* Classify and search the objects we've not yet processed. */
1235 while ((ob = unseen_objects))
1237 struct object **p;
1239 unseen_objects = ob->next;
1240 f = search_object (ob, pc);
1242 /* Insert the object into the classified list. */
1243 for (p = &seen_objects; *p ; p = &(*p)->next)
1244 if ((*p)->pc_begin < ob->pc_begin)
1245 break;
1246 ob->next = *p;
1247 *p = ob;
1249 if (f)
1250 goto fini;
1253 fini:
1254 __gthread_mutex_unlock (&object_mutex);
1255 #endif
1257 if (f)
1259 int encoding;
1260 _Unwind_Ptr func;
1262 bases->tbase = ob->tbase;
1263 bases->dbase = ob->dbase;
1265 encoding = ob->s.b.encoding;
1266 if (ob->s.b.mixed_encoding)
1267 encoding = get_fde_encoding (f);
1268 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1269 f->pc_begin, &func);
1270 bases->func = (void *) func;
1273 return f;