libgo: bump major version
[official-gcc.git] / libgcc / unwind-dw2-fde.c
blob3c0cc654ec0e23c26d00e9f3a644f304647185af
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2022 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #ifndef _Unwind_Find_FDE
27 #include "tconfig.h"
28 #include "tsystem.h"
29 #include "coretypes.h"
30 #include "tm.h"
31 #include "libgcc_tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #else
39 #if (defined(__GTHREAD_MUTEX_INIT) || defined(__GTHREAD_MUTEX_INIT_FUNCTION)) \
40 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
41 #define ATOMIC_FDE_FAST_PATH 1
42 #endif
43 #endif
45 typedef __UINTPTR_TYPE__ uintptr_type;
47 #ifdef ATOMIC_FDE_FAST_PATH
48 #include "unwind-dw2-btree.h"
50 static struct btree registered_frames;
51 static bool in_shutdown;
53 static void
54 release_registered_frames (void) __attribute__ ((destructor));
55 static void
56 release_registered_frames (void)
58 /* Release the b-tree and all frames. Frame releases that happen later are
59 * silently ignored */
60 btree_destroy (&registered_frames);
61 in_shutdown = true;
64 static void
65 get_pc_range (const struct object *ob, uintptr_type *range);
66 static void
67 init_object (struct object *ob);
69 #else
70 /* Without fast path frame deregistration must always succeed. */
71 static const int in_shutdown = 0;
73 /* The unseen_objects list contains objects that have been registered
74 but not yet categorized in any way. The seen_objects list has had
75 its pc_begin and count fields initialized at minimum, and is sorted
76 by decreasing value of pc_begin. */
77 static struct object *unseen_objects;
78 static struct object *seen_objects;
80 #ifdef __GTHREAD_MUTEX_INIT
81 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
82 #define init_object_mutex_once()
83 #else
84 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
85 static __gthread_mutex_t object_mutex;
87 static void
88 init_object_mutex (void)
90 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
93 static void
94 init_object_mutex_once (void)
96 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
97 __gthread_once (&once, init_object_mutex);
99 #else
100 /* ??? Several targets include this file with stubbing parts of gthr.h
101 and expect no locking to be done. */
102 #define init_object_mutex_once()
103 static __gthread_mutex_t object_mutex;
104 #endif
105 #endif
106 #endif
108 /* Called from crtbegin.o to register the unwind info for an object. */
110 void
111 __register_frame_info_bases (const void *begin, struct object *ob,
112 void *tbase, void *dbase)
114 /* If .eh_frame is empty, don't register at all. */
115 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
116 return;
118 ob->pc_begin = (void *)-1;
119 ob->tbase = tbase;
120 ob->dbase = dbase;
121 ob->u.single = begin;
122 ob->s.i = 0;
123 ob->s.b.encoding = DW_EH_PE_omit;
124 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
125 ob->fde_end = NULL;
126 #endif
128 #ifdef ATOMIC_FDE_FAST_PATH
129 // Initialize eagerly to avoid locking later
130 init_object (ob);
132 // And register the frame
133 uintptr_type range[2];
134 get_pc_range (ob, range);
135 btree_insert (&registered_frames, range[0], range[1] - range[0], ob);
136 #else
137 init_object_mutex_once ();
138 __gthread_mutex_lock (&object_mutex);
140 ob->next = unseen_objects;
141 unseen_objects = ob;
143 __gthread_mutex_unlock (&object_mutex);
144 #endif
147 void
148 __register_frame_info (const void *begin, struct object *ob)
150 __register_frame_info_bases (begin, ob, 0, 0);
153 void
154 __register_frame (void *begin)
156 struct object *ob;
158 /* If .eh_frame is empty, don't register at all. */
159 if (*(uword *) begin == 0)
160 return;
162 ob = malloc (sizeof (struct object));
163 __register_frame_info (begin, ob);
166 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
167 for different translation units. Called from the file generated by
168 collect2. */
170 void
171 __register_frame_info_table_bases (void *begin, struct object *ob,
172 void *tbase, void *dbase)
174 ob->pc_begin = (void *)-1;
175 ob->tbase = tbase;
176 ob->dbase = dbase;
177 ob->u.array = begin;
178 ob->s.i = 0;
179 ob->s.b.from_array = 1;
180 ob->s.b.encoding = DW_EH_PE_omit;
182 #ifdef ATOMIC_FDE_FAST_PATH
183 // Initialize eagerly to avoid locking later
184 init_object (ob);
186 // And register the frame
187 uintptr_type range[2];
188 get_pc_range (ob, range);
189 btree_insert (&registered_frames, range[0], range[1] - range[0], ob);
190 #else
191 init_object_mutex_once ();
192 __gthread_mutex_lock (&object_mutex);
194 ob->next = unseen_objects;
195 unseen_objects = ob;
197 __gthread_mutex_unlock (&object_mutex);
198 #endif
201 void
202 __register_frame_info_table (void *begin, struct object *ob)
204 __register_frame_info_table_bases (begin, ob, 0, 0);
207 void
208 __register_frame_table (void *begin)
210 struct object *ob = malloc (sizeof (struct object));
211 __register_frame_info_table (begin, ob);
214 /* Called from crtbegin.o to deregister the unwind info for an object. */
215 /* ??? Glibc has for a while now exported __register_frame_info and
216 __deregister_frame_info. If we call __register_frame_info_bases
217 from crtbegin (wherein it is declared weak), and this object does
218 not get pulled from libgcc.a for other reasons, then the
219 invocation of __deregister_frame_info will be resolved from glibc.
220 Since the registration did not happen there, we'll die.
222 Therefore, declare a new deregistration entry point that does the
223 exact same thing, but will resolve to the same library as
224 implements __register_frame_info_bases. */
226 void *
227 __deregister_frame_info_bases (const void *begin)
229 struct object *ob = 0;
231 /* If .eh_frame is empty, we haven't registered. */
232 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
233 return ob;
235 #ifdef ATOMIC_FDE_FAST_PATH
236 // Find the corresponding PC range
237 struct object lookupob;
238 lookupob.tbase = 0;
239 lookupob.dbase = 0;
240 lookupob.u.single = begin;
241 lookupob.s.i = 0;
242 lookupob.s.b.encoding = DW_EH_PE_omit;
243 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
244 lookupob.fde_end = NULL;
245 #endif
246 uintptr_type range[2];
247 get_pc_range (&lookupob, range);
249 // And remove
250 ob = btree_remove (&registered_frames, range[0]);
251 #else
252 init_object_mutex_once ();
253 __gthread_mutex_lock (&object_mutex);
255 struct object **p;
256 for (p = &unseen_objects; *p ; p = &(*p)->next)
257 if ((*p)->u.single == begin)
259 ob = *p;
260 *p = ob->next;
261 goto out;
264 for (p = &seen_objects; *p ; p = &(*p)->next)
265 if ((*p)->s.b.sorted)
267 if ((*p)->u.sort->orig_data == begin)
269 ob = *p;
270 *p = ob->next;
271 free (ob->u.sort);
272 goto out;
275 else
277 if ((*p)->u.single == begin)
279 ob = *p;
280 *p = ob->next;
281 goto out;
285 out:
286 __gthread_mutex_unlock (&object_mutex);
287 #endif
289 gcc_assert (in_shutdown || ob);
290 return (void *) ob;
293 void *
294 __deregister_frame_info (const void *begin)
296 return __deregister_frame_info_bases (begin);
299 void
300 __deregister_frame (void *begin)
302 /* If .eh_frame is empty, we haven't registered. */
303 if (*(uword *) begin != 0)
304 free (__deregister_frame_info (begin));
308 /* Like base_of_encoded_value, but take the base from a struct object
309 instead of an _Unwind_Context. */
311 static _Unwind_Ptr
312 base_from_object (unsigned char encoding, const struct object *ob)
314 if (encoding == DW_EH_PE_omit)
315 return 0;
317 switch (encoding & 0x70)
319 case DW_EH_PE_absptr:
320 case DW_EH_PE_pcrel:
321 case DW_EH_PE_aligned:
322 return 0;
324 case DW_EH_PE_textrel:
325 return (_Unwind_Ptr) ob->tbase;
326 case DW_EH_PE_datarel:
327 return (_Unwind_Ptr) ob->dbase;
328 default:
329 gcc_unreachable ();
333 /* Return the FDE pointer encoding from the CIE. */
334 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
336 static int
337 get_cie_encoding (const struct dwarf_cie *cie)
339 const unsigned char *aug, *p;
340 _Unwind_Ptr dummy;
341 _uleb128_t utmp;
342 _sleb128_t stmp;
344 aug = cie->augmentation;
345 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
346 if (__builtin_expect (cie->version >= 4, 0))
348 if (p[0] != sizeof (void *) || p[1] != 0)
349 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
350 address sizes or segment selectors. */
351 p += 2; /* Skip address size and segment size. */
354 if (aug[0] != 'z')
355 return DW_EH_PE_absptr;
357 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
358 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
359 if (cie->version == 1) /* Skip return address column. */
360 p++;
361 else
362 p = read_uleb128 (p, &utmp);
364 aug++; /* Skip 'z' */
365 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
366 while (1)
368 /* This is what we're looking for. */
369 if (*aug == 'R')
370 return *p;
371 /* Personality encoding and pointer. */
372 else if (*aug == 'P')
374 /* ??? Avoid dereferencing indirect pointers, since we're
375 faking the base address. Gotta keep DW_EH_PE_aligned
376 intact, however. */
377 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
379 /* LSDA encoding. */
380 else if (*aug == 'L')
381 p++;
382 /* aarch64 b-key pointer authentication. */
383 else if (*aug == 'B')
384 p++;
385 /* Otherwise end of string, or unknown augmentation. */
386 else
387 return DW_EH_PE_absptr;
388 aug++;
392 static inline int
393 get_fde_encoding (const struct dwarf_fde *f)
395 return get_cie_encoding (get_cie (f));
399 /* Sorting an array of FDEs by address.
400 (Ideally we would have the linker sort the FDEs so we don't have to do
401 it at run time. But the linkers are not yet prepared for this.) */
403 /* Comparison routines. Three variants of increasing complexity. */
405 static int
406 fde_unencoded_compare (struct object *ob __attribute__((unused)),
407 const fde *x, const fde *y)
409 _Unwind_Ptr x_ptr, y_ptr;
410 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
411 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
413 if (x_ptr > y_ptr)
414 return 1;
415 if (x_ptr < y_ptr)
416 return -1;
417 return 0;
420 static int
421 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
423 _Unwind_Ptr base, x_ptr, y_ptr;
425 base = base_from_object (ob->s.b.encoding, ob);
426 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
427 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
429 if (x_ptr > y_ptr)
430 return 1;
431 if (x_ptr < y_ptr)
432 return -1;
433 return 0;
436 static int
437 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
439 int x_encoding, y_encoding;
440 _Unwind_Ptr x_ptr, y_ptr;
442 x_encoding = get_fde_encoding (x);
443 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
444 x->pc_begin, &x_ptr);
446 y_encoding = get_fde_encoding (y);
447 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
448 y->pc_begin, &y_ptr);
450 if (x_ptr > y_ptr)
451 return 1;
452 if (x_ptr < y_ptr)
453 return -1;
454 return 0;
457 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
460 /* This is a special mix of insertion sort and heap sort, optimized for
461 the data sets that actually occur. They look like
462 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
463 I.e. a linearly increasing sequence (coming from functions in the text
464 section), with additionally a few unordered elements (coming from functions
465 in gnu_linkonce sections) whose values are higher than the values in the
466 surrounding linear sequence (but not necessarily higher than the values
467 at the end of the linear sequence!).
468 The worst-case total run time is O(N) + O(n log (n)), where N is the
469 total number of FDEs and n is the number of erratic ones. */
471 struct fde_accumulator
473 struct fde_vector *linear;
474 struct fde_vector *erratic;
477 static inline int
478 start_fde_sort (struct fde_accumulator *accu, size_t count)
480 size_t size;
481 if (! count)
482 return 0;
484 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
485 if ((accu->linear = malloc (size)))
487 accu->linear->count = 0;
488 if ((accu->erratic = malloc (size)))
489 accu->erratic->count = 0;
490 return 1;
492 else
493 return 0;
496 static inline void
497 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
499 if (accu->linear)
500 accu->linear->array[accu->linear->count++] = this_fde;
503 /* Split LINEAR into a linear sequence with low values and an erratic
504 sequence with high values, put the linear one (of longest possible
505 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
507 Because the longest linear sequence we are trying to locate within the
508 incoming LINEAR array can be interspersed with (high valued) erratic
509 entries. We construct a chain indicating the sequenced entries.
510 To avoid having to allocate this chain, we overlay it onto the space of
511 the ERRATIC array during construction. A final pass iterates over the
512 chain to determine what should be placed in the ERRATIC array, and
513 what is the linear sequence. This overlay is safe from aliasing. */
515 static inline void
516 fde_split (struct object *ob, fde_compare_t fde_compare,
517 struct fde_vector *linear, struct fde_vector *erratic)
519 static const fde *marker;
520 size_t count = linear->count;
521 const fde *const *chain_end = &marker;
522 size_t i, j, k;
524 /* This should optimize out, but it is wise to make sure this assumption
525 is correct. Should these have different sizes, we cannot cast between
526 them and the overlaying onto ERRATIC will not work. */
527 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
529 for (i = 0; i < count; i++)
531 const fde *const *probe;
533 for (probe = chain_end;
534 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
535 probe = chain_end)
537 chain_end = (const fde *const*) erratic->array[probe - linear->array];
538 erratic->array[probe - linear->array] = NULL;
540 erratic->array[i] = (const fde *) chain_end;
541 chain_end = &linear->array[i];
544 /* Each entry in LINEAR which is part of the linear sequence we have
545 discovered will correspond to a non-NULL entry in the chain we built in
546 the ERRATIC array. */
547 for (i = j = k = 0; i < count; i++)
548 if (erratic->array[i])
549 linear->array[j++] = linear->array[i];
550 else
551 erratic->array[k++] = linear->array[i];
552 linear->count = j;
553 erratic->count = k;
556 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
558 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
559 for the first (root) node; push it down to its rightful place. */
561 static void
562 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
563 int lo, int hi)
565 int i, j;
567 for (i = lo, j = 2*i+1;
568 j < hi;
569 j = 2*i+1)
571 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
572 ++j;
574 if (fde_compare (ob, a[i], a[j]) < 0)
576 SWAP (a[i], a[j]);
577 i = j;
579 else
580 break;
584 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
585 use a name that does not conflict. */
587 static void
588 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
589 struct fde_vector *erratic)
591 /* For a description of this algorithm, see:
592 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
593 p. 60-61. */
594 const fde ** a = erratic->array;
595 /* A portion of the array is called a "heap" if for all i>=0:
596 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
597 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
598 size_t n = erratic->count;
599 int m;
601 /* Expand our heap incrementally from the end of the array, heapifying
602 each resulting semi-heap as we go. After each step, a[m] is the top
603 of a heap. */
604 for (m = n/2-1; m >= 0; --m)
605 frame_downheap (ob, fde_compare, a, m, n);
607 /* Shrink our heap incrementally from the end of the array, first
608 swapping out the largest element a[0] and then re-heapifying the
609 resulting semi-heap. After each step, a[0..m) is a heap. */
610 for (m = n-1; m >= 1; --m)
612 SWAP (a[0], a[m]);
613 frame_downheap (ob, fde_compare, a, 0, m);
615 #undef SWAP
618 /* Merge V1 and V2, both sorted, and put the result into V1. */
619 static inline void
620 fde_merge (struct object *ob, fde_compare_t fde_compare,
621 struct fde_vector *v1, struct fde_vector *v2)
623 size_t i1, i2;
624 const fde * fde2;
626 i2 = v2->count;
627 if (i2 > 0)
629 i1 = v1->count;
632 i2--;
633 fde2 = v2->array[i2];
634 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
636 v1->array[i1+i2] = v1->array[i1-1];
637 i1--;
639 v1->array[i1+i2] = fde2;
641 while (i2 > 0);
642 v1->count += v2->count;
646 static inline void
647 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
649 fde_compare_t fde_compare;
651 gcc_assert (!accu->linear || accu->linear->count == count);
653 if (ob->s.b.mixed_encoding)
654 fde_compare = fde_mixed_encoding_compare;
655 else if (ob->s.b.encoding == DW_EH_PE_absptr)
656 fde_compare = fde_unencoded_compare;
657 else
658 fde_compare = fde_single_encoding_compare;
660 if (accu->erratic)
662 fde_split (ob, fde_compare, accu->linear, accu->erratic);
663 gcc_assert (accu->linear->count + accu->erratic->count == count);
664 frame_heapsort (ob, fde_compare, accu->erratic);
665 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
666 free (accu->erratic);
668 else
670 /* We've not managed to malloc an erratic array,
671 so heap sort in the linear one. */
672 frame_heapsort (ob, fde_compare, accu->linear);
676 /* Inspect the fde array beginning at this_fde. This
677 function can be used either in query mode (RANGE is
678 not null, OB is const), or in update mode (RANGE is
679 null, OB is modified). In query mode the function computes
680 the range of PC values and stores it in RANGE. In
681 update mode it updates encoding, mixed_encoding, and pc_begin
682 for OB. Return the number of fdes encountered along the way. */
684 static size_t
685 classify_object_over_fdes (struct object *ob, const fde *this_fde,
686 uintptr_type *range)
688 const struct dwarf_cie *last_cie = 0;
689 size_t count = 0;
690 int encoding = DW_EH_PE_absptr;
691 _Unwind_Ptr base = 0;
693 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
695 const struct dwarf_cie *this_cie;
696 _Unwind_Ptr mask, pc_begin;
698 /* Skip CIEs. */
699 if (this_fde->CIE_delta == 0)
700 continue;
702 /* Determine the encoding for this FDE. Note mixed encoded
703 objects for later. */
704 this_cie = get_cie (this_fde);
705 if (this_cie != last_cie)
707 last_cie = this_cie;
708 encoding = get_cie_encoding (this_cie);
709 if (encoding == DW_EH_PE_omit)
710 return -1;
711 base = base_from_object (encoding, ob);
712 if (!range)
714 if (ob->s.b.encoding == DW_EH_PE_omit)
715 ob->s.b.encoding = encoding;
716 else if (ob->s.b.encoding != encoding)
717 ob->s.b.mixed_encoding = 1;
721 const unsigned char *p;
722 p = read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
723 &pc_begin);
725 /* Take care to ignore link-once functions that were removed.
726 In these cases, the function address will be NULL, but if
727 the encoding is smaller than a pointer a true NULL may not
728 be representable. Assume 0 in the representable bits is NULL. */
729 mask = size_of_encoded_value (encoding);
730 if (mask < sizeof (void *))
731 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
732 else
733 mask = -1;
735 if ((pc_begin & mask) == 0)
736 continue;
738 count += 1;
739 if (range)
741 _Unwind_Ptr pc_range, pc_end;
742 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
743 pc_end = pc_begin + pc_range;
744 if ((!range[0]) && (!range[1]))
746 range[0] = pc_begin;
747 range[1] = pc_end;
749 else
751 if (pc_begin < range[0])
752 range[0] = pc_begin;
753 if (pc_end > range[1])
754 range[1] = pc_end;
757 else
759 if ((void *) pc_begin < ob->pc_begin)
760 ob->pc_begin = (void *) pc_begin;
764 return count;
767 static void
768 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
770 const struct dwarf_cie *last_cie = 0;
771 int encoding = ob->s.b.encoding;
772 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
774 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
776 const struct dwarf_cie *this_cie;
778 /* Skip CIEs. */
779 if (this_fde->CIE_delta == 0)
780 continue;
782 if (ob->s.b.mixed_encoding)
784 /* Determine the encoding for this FDE. Note mixed encoded
785 objects for later. */
786 this_cie = get_cie (this_fde);
787 if (this_cie != last_cie)
789 last_cie = this_cie;
790 encoding = get_cie_encoding (this_cie);
791 base = base_from_object (encoding, ob);
795 if (encoding == DW_EH_PE_absptr)
797 _Unwind_Ptr ptr;
798 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
799 if (ptr == 0)
800 continue;
802 else
804 _Unwind_Ptr pc_begin, mask;
806 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
807 &pc_begin);
809 /* Take care to ignore link-once functions that were removed.
810 In these cases, the function address will be NULL, but if
811 the encoding is smaller than a pointer a true NULL may not
812 be representable. Assume 0 in the representable bits is NULL. */
813 mask = size_of_encoded_value (encoding);
814 if (mask < sizeof (void *))
815 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
816 else
817 mask = -1;
819 if ((pc_begin & mask) == 0)
820 continue;
823 fde_insert (accu, this_fde);
827 /* Set up a sorted array of pointers to FDEs for a loaded object. We
828 count up the entries before allocating the array because it's likely to
829 be faster. We can be called multiple times, should we have failed to
830 allocate a sorted fde array on a previous occasion. */
832 static inline void
833 init_object (struct object* ob)
835 struct fde_accumulator accu;
836 size_t count;
838 count = ob->s.b.count;
839 if (count == 0)
841 if (ob->s.b.from_array)
843 fde **p = ob->u.array;
844 for (count = 0; *p; ++p)
846 size_t cur_count = classify_object_over_fdes (ob, *p, NULL);
847 if (cur_count == (size_t) -1)
848 goto unhandled_fdes;
849 count += cur_count;
852 else
854 count = classify_object_over_fdes (ob, ob->u.single, NULL);
855 if (count == (size_t) -1)
857 static const fde terminator;
858 unhandled_fdes:
859 ob->s.i = 0;
860 ob->s.b.encoding = DW_EH_PE_omit;
861 ob->u.single = &terminator;
862 return;
866 /* The count field we have in the main struct object is somewhat
867 limited, but should suffice for virtually all cases. If the
868 counted value doesn't fit, re-write a zero. The worst that
869 happens is that we re-count next time -- admittedly non-trivial
870 in that this implies some 2M fdes, but at least we function. */
871 ob->s.b.count = count;
872 if (ob->s.b.count != count)
873 ob->s.b.count = 0;
876 if (!start_fde_sort (&accu, count))
877 return;
879 if (ob->s.b.from_array)
881 fde **p;
882 for (p = ob->u.array; *p; ++p)
883 add_fdes (ob, &accu, *p);
885 else
886 add_fdes (ob, &accu, ob->u.single);
888 end_fde_sort (ob, &accu, count);
890 /* Save the original fde pointer, since this is the key by which the
891 DSO will deregister the object. */
892 accu.linear->orig_data = ob->u.single;
893 ob->u.sort = accu.linear;
895 ob->s.b.sorted = 1;
898 #ifdef ATOMIC_FDE_FAST_PATH
899 /* Get the PC range for lookup */
900 static void
901 get_pc_range (const struct object *ob, uintptr_type *range)
903 // It is safe to cast to non-const object* here as
904 // classify_object_over_fdes does not modify ob in query mode.
905 struct object *ncob = (struct object *) (uintptr_type) ob;
906 range[0] = range[1] = 0;
907 if (ob->s.b.sorted)
909 classify_object_over_fdes (ncob, ob->u.sort->orig_data, range);
911 else if (ob->s.b.from_array)
913 fde **p = ob->u.array;
914 for (; *p; ++p)
915 classify_object_over_fdes (ncob, *p, range);
917 else
919 classify_object_over_fdes (ncob, ob->u.single, range);
922 #endif
924 /* A linear search through a set of FDEs for the given PC. This is
925 used when there was insufficient memory to allocate and sort an
926 array. */
928 static const fde *
929 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
931 const struct dwarf_cie *last_cie = 0;
932 int encoding = ob->s.b.encoding;
933 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
935 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
937 const struct dwarf_cie *this_cie;
938 _Unwind_Ptr pc_begin, pc_range;
940 /* Skip CIEs. */
941 if (this_fde->CIE_delta == 0)
942 continue;
944 if (ob->s.b.mixed_encoding)
946 /* Determine the encoding for this FDE. Note mixed encoded
947 objects for later. */
948 this_cie = get_cie (this_fde);
949 if (this_cie != last_cie)
951 last_cie = this_cie;
952 encoding = get_cie_encoding (this_cie);
953 base = base_from_object (encoding, ob);
957 if (encoding == DW_EH_PE_absptr)
959 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
960 pc_begin = pc_array[0];
961 pc_range = pc_array[1];
962 if (pc_begin == 0)
963 continue;
965 else
967 _Unwind_Ptr mask;
968 const unsigned char *p;
970 p = read_encoded_value_with_base (encoding, base,
971 this_fde->pc_begin, &pc_begin);
972 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
974 /* Take care to ignore link-once functions that were removed.
975 In these cases, the function address will be NULL, but if
976 the encoding is smaller than a pointer a true NULL may not
977 be representable. Assume 0 in the representable bits is NULL. */
978 mask = size_of_encoded_value (encoding);
979 if (mask < sizeof (void *))
980 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
981 else
982 mask = -1;
984 if ((pc_begin & mask) == 0)
985 continue;
988 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
989 return this_fde;
992 return NULL;
995 /* Binary search for an FDE containing the given PC. Here are three
996 implementations of increasing complexity. */
998 static inline const fde *
999 binary_search_unencoded_fdes (struct object *ob, void *pc)
1001 struct fde_vector *vec = ob->u.sort;
1002 size_t lo, hi;
1004 for (lo = 0, hi = vec->count; lo < hi; )
1006 size_t i = (lo + hi) / 2;
1007 const fde *const f = vec->array[i];
1008 void *pc_begin;
1009 uaddr pc_range;
1010 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
1011 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
1013 if (pc < pc_begin)
1014 hi = i;
1015 else if (pc >= pc_begin + pc_range)
1016 lo = i + 1;
1017 else
1018 return f;
1021 return NULL;
1024 static inline const fde *
1025 binary_search_single_encoding_fdes (struct object *ob, void *pc)
1027 struct fde_vector *vec = ob->u.sort;
1028 int encoding = ob->s.b.encoding;
1029 _Unwind_Ptr base = base_from_object (encoding, ob);
1030 size_t lo, hi;
1032 for (lo = 0, hi = vec->count; lo < hi; )
1034 size_t i = (lo + hi) / 2;
1035 const fde *f = vec->array[i];
1036 _Unwind_Ptr pc_begin, pc_range;
1037 const unsigned char *p;
1039 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
1040 &pc_begin);
1041 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
1043 if ((_Unwind_Ptr) pc < pc_begin)
1044 hi = i;
1045 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
1046 lo = i + 1;
1047 else
1048 return f;
1051 return NULL;
1054 static inline const fde *
1055 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
1057 struct fde_vector *vec = ob->u.sort;
1058 size_t lo, hi;
1060 for (lo = 0, hi = vec->count; lo < hi; )
1062 size_t i = (lo + hi) / 2;
1063 const fde *f = vec->array[i];
1064 _Unwind_Ptr pc_begin, pc_range;
1065 const unsigned char *p;
1066 int encoding;
1068 encoding = get_fde_encoding (f);
1069 p = read_encoded_value_with_base (encoding,
1070 base_from_object (encoding, ob),
1071 f->pc_begin, &pc_begin);
1072 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
1074 if ((_Unwind_Ptr) pc < pc_begin)
1075 hi = i;
1076 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
1077 lo = i + 1;
1078 else
1079 return f;
1082 return NULL;
1085 static const fde *
1086 search_object (struct object* ob, void *pc)
1088 /* The fast path initializes objects eagerly to avoid locking.
1089 * On the slow path we initialize them now */
1090 #ifndef ATOMIC_FDE_FAST_PATH
1091 /* If the data hasn't been sorted, try to do this now. We may have
1092 more memory available than last time we tried. */
1093 if (! ob->s.b.sorted)
1095 init_object (ob);
1097 /* Despite the above comment, the normal reason to get here is
1098 that we've not processed this object before. A quick range
1099 check is in order. */
1100 if (pc < ob->pc_begin)
1101 return NULL;
1103 #endif
1105 if (ob->s.b.sorted)
1107 if (ob->s.b.mixed_encoding)
1108 return binary_search_mixed_encoding_fdes (ob, pc);
1109 else if (ob->s.b.encoding == DW_EH_PE_absptr)
1110 return binary_search_unencoded_fdes (ob, pc);
1111 else
1112 return binary_search_single_encoding_fdes (ob, pc);
1114 else
1116 /* Long slow laborious linear search, cos we've no memory. */
1117 if (ob->s.b.from_array)
1119 fde **p;
1120 for (p = ob->u.array; *p ; p++)
1122 const fde *f = linear_search_fdes (ob, *p, pc);
1123 if (f)
1124 return f;
1126 return NULL;
1128 else
1129 return linear_search_fdes (ob, ob->u.single, pc);
1133 const fde *
1134 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1136 struct object *ob;
1137 const fde *f = NULL;
1139 #ifdef ATOMIC_FDE_FAST_PATH
1140 ob = btree_lookup (&registered_frames, (uintptr_type) pc);
1141 if (!ob)
1142 return NULL;
1144 f = search_object (ob, pc);
1145 #else
1147 init_object_mutex_once ();
1148 __gthread_mutex_lock (&object_mutex);
1150 /* Linear search through the classified objects, to find the one
1151 containing the pc. Note that pc_begin is sorted descending, and
1152 we expect objects to be non-overlapping. */
1153 for (ob = seen_objects; ob; ob = ob->next)
1154 if (pc >= ob->pc_begin)
1156 f = search_object (ob, pc);
1157 if (f)
1158 goto fini;
1159 break;
1162 /* Classify and search the objects we've not yet processed. */
1163 while ((ob = unseen_objects))
1165 struct object **p;
1167 unseen_objects = ob->next;
1168 f = search_object (ob, pc);
1170 /* Insert the object into the classified list. */
1171 for (p = &seen_objects; *p ; p = &(*p)->next)
1172 if ((*p)->pc_begin < ob->pc_begin)
1173 break;
1174 ob->next = *p;
1175 *p = ob;
1177 if (f)
1178 goto fini;
1181 fini:
1182 __gthread_mutex_unlock (&object_mutex);
1183 #endif
1185 if (f)
1187 int encoding;
1188 _Unwind_Ptr func;
1190 bases->tbase = ob->tbase;
1191 bases->dbase = ob->dbase;
1193 encoding = ob->s.b.encoding;
1194 if (ob->s.b.mixed_encoding)
1195 encoding = get_fde_encoding (f);
1196 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1197 f->pc_begin, &func);
1198 bases->func = (void *) func;
1201 return f;