PR target/65871
[official-gcc.git] / libgcc / unwind-dw2-fde.c
blob0bcf51661b89779e7ae4cc2023068a47d02e24f4
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2015 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #ifndef _Unwind_Find_FDE
27 #include "tconfig.h"
28 #include "tsystem.h"
29 #include "coretypes.h"
30 #include "tm.h"
31 #include "libgcc_tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #endif
40 /* The unseen_objects list contains objects that have been registered
41 but not yet categorized in any way. The seen_objects list has had
42 its pc_begin and count fields initialized at minimum, and is sorted
43 by decreasing value of pc_begin. */
44 static struct object *unseen_objects;
45 static struct object *seen_objects;
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
49 #define init_object_mutex_once()
50 #else
51 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
52 static __gthread_mutex_t object_mutex;
54 static void
55 init_object_mutex (void)
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
60 static void
61 init_object_mutex_once (void)
63 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
64 __gthread_once (&once, init_object_mutex);
66 #else
67 /* ??? Several targets include this file with stubbing parts of gthr.h
68 and expect no locking to be done. */
69 #define init_object_mutex_once()
70 static __gthread_mutex_t object_mutex;
71 #endif
72 #endif
74 /* Called from crtbegin.o to register the unwind info for an object. */
76 void
77 __register_frame_info_bases (const void *begin, struct object *ob,
78 void *tbase, void *dbase)
80 /* If .eh_frame is empty, don't register at all. */
81 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
82 return;
84 ob->pc_begin = (void *)-1;
85 ob->tbase = tbase;
86 ob->dbase = dbase;
87 ob->u.single = begin;
88 ob->s.i = 0;
89 ob->s.b.encoding = DW_EH_PE_omit;
90 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
91 ob->fde_end = NULL;
92 #endif
94 init_object_mutex_once ();
95 __gthread_mutex_lock (&object_mutex);
97 ob->next = unseen_objects;
98 unseen_objects = ob;
100 __gthread_mutex_unlock (&object_mutex);
103 void
104 __register_frame_info (const void *begin, struct object *ob)
106 __register_frame_info_bases (begin, ob, 0, 0);
109 void
110 __register_frame (void *begin)
112 struct object *ob;
114 /* If .eh_frame is empty, don't register at all. */
115 if (*(uword *) begin == 0)
116 return;
118 ob = malloc (sizeof (struct object));
119 __register_frame_info (begin, ob);
122 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
123 for different translation units. Called from the file generated by
124 collect2. */
126 void
127 __register_frame_info_table_bases (void *begin, struct object *ob,
128 void *tbase, void *dbase)
130 ob->pc_begin = (void *)-1;
131 ob->tbase = tbase;
132 ob->dbase = dbase;
133 ob->u.array = begin;
134 ob->s.i = 0;
135 ob->s.b.from_array = 1;
136 ob->s.b.encoding = DW_EH_PE_omit;
138 init_object_mutex_once ();
139 __gthread_mutex_lock (&object_mutex);
141 ob->next = unseen_objects;
142 unseen_objects = ob;
144 __gthread_mutex_unlock (&object_mutex);
147 void
148 __register_frame_info_table (void *begin, struct object *ob)
150 __register_frame_info_table_bases (begin, ob, 0, 0);
153 void
154 __register_frame_table (void *begin)
156 struct object *ob = malloc (sizeof (struct object));
157 __register_frame_info_table (begin, ob);
160 /* Called from crtbegin.o to deregister the unwind info for an object. */
161 /* ??? Glibc has for a while now exported __register_frame_info and
162 __deregister_frame_info. If we call __register_frame_info_bases
163 from crtbegin (wherein it is declared weak), and this object does
164 not get pulled from libgcc.a for other reasons, then the
165 invocation of __deregister_frame_info will be resolved from glibc.
166 Since the registration did not happen there, we'll die.
168 Therefore, declare a new deregistration entry point that does the
169 exact same thing, but will resolve to the same library as
170 implements __register_frame_info_bases. */
172 void *
173 __deregister_frame_info_bases (const void *begin)
175 struct object **p;
176 struct object *ob = 0;
178 /* If .eh_frame is empty, we haven't registered. */
179 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
180 return ob;
182 init_object_mutex_once ();
183 __gthread_mutex_lock (&object_mutex);
185 for (p = &unseen_objects; *p ; p = &(*p)->next)
186 if ((*p)->u.single == begin)
188 ob = *p;
189 *p = ob->next;
190 goto out;
193 for (p = &seen_objects; *p ; p = &(*p)->next)
194 if ((*p)->s.b.sorted)
196 if ((*p)->u.sort->orig_data == begin)
198 ob = *p;
199 *p = ob->next;
200 free (ob->u.sort);
201 goto out;
204 else
206 if ((*p)->u.single == begin)
208 ob = *p;
209 *p = ob->next;
210 goto out;
214 out:
215 __gthread_mutex_unlock (&object_mutex);
216 gcc_assert (ob);
217 return (void *) ob;
220 void *
221 __deregister_frame_info (const void *begin)
223 return __deregister_frame_info_bases (begin);
226 void
227 __deregister_frame (void *begin)
229 /* If .eh_frame is empty, we haven't registered. */
230 if (*(uword *) begin != 0)
231 free (__deregister_frame_info (begin));
235 /* Like base_of_encoded_value, but take the base from a struct object
236 instead of an _Unwind_Context. */
238 static _Unwind_Ptr
239 base_from_object (unsigned char encoding, struct object *ob)
241 if (encoding == DW_EH_PE_omit)
242 return 0;
244 switch (encoding & 0x70)
246 case DW_EH_PE_absptr:
247 case DW_EH_PE_pcrel:
248 case DW_EH_PE_aligned:
249 return 0;
251 case DW_EH_PE_textrel:
252 return (_Unwind_Ptr) ob->tbase;
253 case DW_EH_PE_datarel:
254 return (_Unwind_Ptr) ob->dbase;
255 default:
256 gcc_unreachable ();
260 /* Return the FDE pointer encoding from the CIE. */
261 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
263 static int
264 get_cie_encoding (const struct dwarf_cie *cie)
266 const unsigned char *aug, *p;
267 _Unwind_Ptr dummy;
268 _uleb128_t utmp;
269 _sleb128_t stmp;
271 aug = cie->augmentation;
272 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
273 if (__builtin_expect (cie->version >= 4, 0))
275 if (p[0] != sizeof (void *) || p[1] != 0)
276 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
277 address sizes or segment selectors. */
278 p += 2; /* Skip address size and segment size. */
281 if (aug[0] != 'z')
282 return DW_EH_PE_absptr;
284 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
285 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
286 if (cie->version == 1) /* Skip return address column. */
287 p++;
288 else
289 p = read_uleb128 (p, &utmp);
291 aug++; /* Skip 'z' */
292 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
293 while (1)
295 /* This is what we're looking for. */
296 if (*aug == 'R')
297 return *p;
298 /* Personality encoding and pointer. */
299 else if (*aug == 'P')
301 /* ??? Avoid dereferencing indirect pointers, since we're
302 faking the base address. Gotta keep DW_EH_PE_aligned
303 intact, however. */
304 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
306 /* LSDA encoding. */
307 else if (*aug == 'L')
308 p++;
309 /* Otherwise end of string, or unknown augmentation. */
310 else
311 return DW_EH_PE_absptr;
312 aug++;
316 static inline int
317 get_fde_encoding (const struct dwarf_fde *f)
319 return get_cie_encoding (get_cie (f));
323 /* Sorting an array of FDEs by address.
324 (Ideally we would have the linker sort the FDEs so we don't have to do
325 it at run time. But the linkers are not yet prepared for this.) */
327 /* Comparison routines. Three variants of increasing complexity. */
329 static int
330 fde_unencoded_compare (struct object *ob __attribute__((unused)),
331 const fde *x, const fde *y)
333 _Unwind_Ptr x_ptr, y_ptr;
334 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
335 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
337 if (x_ptr > y_ptr)
338 return 1;
339 if (x_ptr < y_ptr)
340 return -1;
341 return 0;
344 static int
345 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
347 _Unwind_Ptr base, x_ptr, y_ptr;
349 base = base_from_object (ob->s.b.encoding, ob);
350 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
351 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
353 if (x_ptr > y_ptr)
354 return 1;
355 if (x_ptr < y_ptr)
356 return -1;
357 return 0;
360 static int
361 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
363 int x_encoding, y_encoding;
364 _Unwind_Ptr x_ptr, y_ptr;
366 x_encoding = get_fde_encoding (x);
367 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
368 x->pc_begin, &x_ptr);
370 y_encoding = get_fde_encoding (y);
371 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
372 y->pc_begin, &y_ptr);
374 if (x_ptr > y_ptr)
375 return 1;
376 if (x_ptr < y_ptr)
377 return -1;
378 return 0;
381 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
384 /* This is a special mix of insertion sort and heap sort, optimized for
385 the data sets that actually occur. They look like
386 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
387 I.e. a linearly increasing sequence (coming from functions in the text
388 section), with additionally a few unordered elements (coming from functions
389 in gnu_linkonce sections) whose values are higher than the values in the
390 surrounding linear sequence (but not necessarily higher than the values
391 at the end of the linear sequence!).
392 The worst-case total run time is O(N) + O(n log (n)), where N is the
393 total number of FDEs and n is the number of erratic ones. */
395 struct fde_accumulator
397 struct fde_vector *linear;
398 struct fde_vector *erratic;
401 static inline int
402 start_fde_sort (struct fde_accumulator *accu, size_t count)
404 size_t size;
405 if (! count)
406 return 0;
408 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
409 if ((accu->linear = malloc (size)))
411 accu->linear->count = 0;
412 if ((accu->erratic = malloc (size)))
413 accu->erratic->count = 0;
414 return 1;
416 else
417 return 0;
420 static inline void
421 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
423 if (accu->linear)
424 accu->linear->array[accu->linear->count++] = this_fde;
427 /* Split LINEAR into a linear sequence with low values and an erratic
428 sequence with high values, put the linear one (of longest possible
429 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
431 Because the longest linear sequence we are trying to locate within the
432 incoming LINEAR array can be interspersed with (high valued) erratic
433 entries. We construct a chain indicating the sequenced entries.
434 To avoid having to allocate this chain, we overlay it onto the space of
435 the ERRATIC array during construction. A final pass iterates over the
436 chain to determine what should be placed in the ERRATIC array, and
437 what is the linear sequence. This overlay is safe from aliasing. */
439 static inline void
440 fde_split (struct object *ob, fde_compare_t fde_compare,
441 struct fde_vector *linear, struct fde_vector *erratic)
443 static const fde *marker;
444 size_t count = linear->count;
445 const fde *const *chain_end = &marker;
446 size_t i, j, k;
448 /* This should optimize out, but it is wise to make sure this assumption
449 is correct. Should these have different sizes, we cannot cast between
450 them and the overlaying onto ERRATIC will not work. */
451 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
453 for (i = 0; i < count; i++)
455 const fde *const *probe;
457 for (probe = chain_end;
458 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
459 probe = chain_end)
461 chain_end = (const fde *const*) erratic->array[probe - linear->array];
462 erratic->array[probe - linear->array] = NULL;
464 erratic->array[i] = (const fde *) chain_end;
465 chain_end = &linear->array[i];
468 /* Each entry in LINEAR which is part of the linear sequence we have
469 discovered will correspond to a non-NULL entry in the chain we built in
470 the ERRATIC array. */
471 for (i = j = k = 0; i < count; i++)
472 if (erratic->array[i])
473 linear->array[j++] = linear->array[i];
474 else
475 erratic->array[k++] = linear->array[i];
476 linear->count = j;
477 erratic->count = k;
480 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
482 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
483 for the first (root) node; push it down to its rightful place. */
485 static void
486 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
487 int lo, int hi)
489 int i, j;
491 for (i = lo, j = 2*i+1;
492 j < hi;
493 j = 2*i+1)
495 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
496 ++j;
498 if (fde_compare (ob, a[i], a[j]) < 0)
500 SWAP (a[i], a[j]);
501 i = j;
503 else
504 break;
508 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
509 use a name that does not conflict. */
511 static void
512 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
513 struct fde_vector *erratic)
515 /* For a description of this algorithm, see:
516 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
517 p. 60-61. */
518 const fde ** a = erratic->array;
519 /* A portion of the array is called a "heap" if for all i>=0:
520 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
521 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
522 size_t n = erratic->count;
523 int m;
525 /* Expand our heap incrementally from the end of the array, heapifying
526 each resulting semi-heap as we go. After each step, a[m] is the top
527 of a heap. */
528 for (m = n/2-1; m >= 0; --m)
529 frame_downheap (ob, fde_compare, a, m, n);
531 /* Shrink our heap incrementally from the end of the array, first
532 swapping out the largest element a[0] and then re-heapifying the
533 resulting semi-heap. After each step, a[0..m) is a heap. */
534 for (m = n-1; m >= 1; --m)
536 SWAP (a[0], a[m]);
537 frame_downheap (ob, fde_compare, a, 0, m);
539 #undef SWAP
542 /* Merge V1 and V2, both sorted, and put the result into V1. */
543 static inline void
544 fde_merge (struct object *ob, fde_compare_t fde_compare,
545 struct fde_vector *v1, struct fde_vector *v2)
547 size_t i1, i2;
548 const fde * fde2;
550 i2 = v2->count;
551 if (i2 > 0)
553 i1 = v1->count;
556 i2--;
557 fde2 = v2->array[i2];
558 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
560 v1->array[i1+i2] = v1->array[i1-1];
561 i1--;
563 v1->array[i1+i2] = fde2;
565 while (i2 > 0);
566 v1->count += v2->count;
570 static inline void
571 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
573 fde_compare_t fde_compare;
575 gcc_assert (!accu->linear || accu->linear->count == count);
577 if (ob->s.b.mixed_encoding)
578 fde_compare = fde_mixed_encoding_compare;
579 else if (ob->s.b.encoding == DW_EH_PE_absptr)
580 fde_compare = fde_unencoded_compare;
581 else
582 fde_compare = fde_single_encoding_compare;
584 if (accu->erratic)
586 fde_split (ob, fde_compare, accu->linear, accu->erratic);
587 gcc_assert (accu->linear->count + accu->erratic->count == count);
588 frame_heapsort (ob, fde_compare, accu->erratic);
589 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
590 free (accu->erratic);
592 else
594 /* We've not managed to malloc an erratic array,
595 so heap sort in the linear one. */
596 frame_heapsort (ob, fde_compare, accu->linear);
601 /* Update encoding, mixed_encoding, and pc_begin for OB for the
602 fde array beginning at THIS_FDE. Return the number of fdes
603 encountered along the way. */
605 static size_t
606 classify_object_over_fdes (struct object *ob, const fde *this_fde)
608 const struct dwarf_cie *last_cie = 0;
609 size_t count = 0;
610 int encoding = DW_EH_PE_absptr;
611 _Unwind_Ptr base = 0;
613 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
615 const struct dwarf_cie *this_cie;
616 _Unwind_Ptr mask, pc_begin;
618 /* Skip CIEs. */
619 if (this_fde->CIE_delta == 0)
620 continue;
622 /* Determine the encoding for this FDE. Note mixed encoded
623 objects for later. */
624 this_cie = get_cie (this_fde);
625 if (this_cie != last_cie)
627 last_cie = this_cie;
628 encoding = get_cie_encoding (this_cie);
629 if (encoding == DW_EH_PE_omit)
630 return -1;
631 base = base_from_object (encoding, ob);
632 if (ob->s.b.encoding == DW_EH_PE_omit)
633 ob->s.b.encoding = encoding;
634 else if (ob->s.b.encoding != encoding)
635 ob->s.b.mixed_encoding = 1;
638 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
639 &pc_begin);
641 /* Take care to ignore link-once functions that were removed.
642 In these cases, the function address will be NULL, but if
643 the encoding is smaller than a pointer a true NULL may not
644 be representable. Assume 0 in the representable bits is NULL. */
645 mask = size_of_encoded_value (encoding);
646 if (mask < sizeof (void *))
647 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
648 else
649 mask = -1;
651 if ((pc_begin & mask) == 0)
652 continue;
654 count += 1;
655 if ((void *) pc_begin < ob->pc_begin)
656 ob->pc_begin = (void *) pc_begin;
659 return count;
662 static void
663 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
665 const struct dwarf_cie *last_cie = 0;
666 int encoding = ob->s.b.encoding;
667 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
669 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
671 const struct dwarf_cie *this_cie;
673 /* Skip CIEs. */
674 if (this_fde->CIE_delta == 0)
675 continue;
677 if (ob->s.b.mixed_encoding)
679 /* Determine the encoding for this FDE. Note mixed encoded
680 objects for later. */
681 this_cie = get_cie (this_fde);
682 if (this_cie != last_cie)
684 last_cie = this_cie;
685 encoding = get_cie_encoding (this_cie);
686 base = base_from_object (encoding, ob);
690 if (encoding == DW_EH_PE_absptr)
692 _Unwind_Ptr ptr;
693 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
694 if (ptr == 0)
695 continue;
697 else
699 _Unwind_Ptr pc_begin, mask;
701 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
702 &pc_begin);
704 /* Take care to ignore link-once functions that were removed.
705 In these cases, the function address will be NULL, but if
706 the encoding is smaller than a pointer a true NULL may not
707 be representable. Assume 0 in the representable bits is NULL. */
708 mask = size_of_encoded_value (encoding);
709 if (mask < sizeof (void *))
710 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
711 else
712 mask = -1;
714 if ((pc_begin & mask) == 0)
715 continue;
718 fde_insert (accu, this_fde);
722 /* Set up a sorted array of pointers to FDEs for a loaded object. We
723 count up the entries before allocating the array because it's likely to
724 be faster. We can be called multiple times, should we have failed to
725 allocate a sorted fde array on a previous occasion. */
727 static inline void
728 init_object (struct object* ob)
730 struct fde_accumulator accu;
731 size_t count;
733 count = ob->s.b.count;
734 if (count == 0)
736 if (ob->s.b.from_array)
738 fde **p = ob->u.array;
739 for (count = 0; *p; ++p)
741 size_t cur_count = classify_object_over_fdes (ob, *p);
742 if (cur_count == (size_t) -1)
743 goto unhandled_fdes;
744 count += cur_count;
747 else
749 count = classify_object_over_fdes (ob, ob->u.single);
750 if (count == (size_t) -1)
752 static const fde terminator;
753 unhandled_fdes:
754 ob->s.i = 0;
755 ob->s.b.encoding = DW_EH_PE_omit;
756 ob->u.single = &terminator;
757 return;
761 /* The count field we have in the main struct object is somewhat
762 limited, but should suffice for virtually all cases. If the
763 counted value doesn't fit, re-write a zero. The worst that
764 happens is that we re-count next time -- admittedly non-trivial
765 in that this implies some 2M fdes, but at least we function. */
766 ob->s.b.count = count;
767 if (ob->s.b.count != count)
768 ob->s.b.count = 0;
771 if (!start_fde_sort (&accu, count))
772 return;
774 if (ob->s.b.from_array)
776 fde **p;
777 for (p = ob->u.array; *p; ++p)
778 add_fdes (ob, &accu, *p);
780 else
781 add_fdes (ob, &accu, ob->u.single);
783 end_fde_sort (ob, &accu, count);
785 /* Save the original fde pointer, since this is the key by which the
786 DSO will deregister the object. */
787 accu.linear->orig_data = ob->u.single;
788 ob->u.sort = accu.linear;
790 ob->s.b.sorted = 1;
793 /* A linear search through a set of FDEs for the given PC. This is
794 used when there was insufficient memory to allocate and sort an
795 array. */
797 static const fde *
798 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
800 const struct dwarf_cie *last_cie = 0;
801 int encoding = ob->s.b.encoding;
802 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
804 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
806 const struct dwarf_cie *this_cie;
807 _Unwind_Ptr pc_begin, pc_range;
809 /* Skip CIEs. */
810 if (this_fde->CIE_delta == 0)
811 continue;
813 if (ob->s.b.mixed_encoding)
815 /* Determine the encoding for this FDE. Note mixed encoded
816 objects for later. */
817 this_cie = get_cie (this_fde);
818 if (this_cie != last_cie)
820 last_cie = this_cie;
821 encoding = get_cie_encoding (this_cie);
822 base = base_from_object (encoding, ob);
826 if (encoding == DW_EH_PE_absptr)
828 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
829 pc_begin = pc_array[0];
830 pc_range = pc_array[1];
831 if (pc_begin == 0)
832 continue;
834 else
836 _Unwind_Ptr mask;
837 const unsigned char *p;
839 p = read_encoded_value_with_base (encoding, base,
840 this_fde->pc_begin, &pc_begin);
841 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
843 /* Take care to ignore link-once functions that were removed.
844 In these cases, the function address will be NULL, but if
845 the encoding is smaller than a pointer a true NULL may not
846 be representable. Assume 0 in the representable bits is NULL. */
847 mask = size_of_encoded_value (encoding);
848 if (mask < sizeof (void *))
849 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
850 else
851 mask = -1;
853 if ((pc_begin & mask) == 0)
854 continue;
857 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
858 return this_fde;
861 return NULL;
864 /* Binary search for an FDE containing the given PC. Here are three
865 implementations of increasing complexity. */
867 static inline const fde *
868 binary_search_unencoded_fdes (struct object *ob, void *pc)
870 struct fde_vector *vec = ob->u.sort;
871 size_t lo, hi;
873 for (lo = 0, hi = vec->count; lo < hi; )
875 size_t i = (lo + hi) / 2;
876 const fde *const f = vec->array[i];
877 void *pc_begin;
878 uaddr pc_range;
879 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
880 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
882 if (pc < pc_begin)
883 hi = i;
884 else if (pc >= pc_begin + pc_range)
885 lo = i + 1;
886 else
887 return f;
890 return NULL;
893 static inline const fde *
894 binary_search_single_encoding_fdes (struct object *ob, void *pc)
896 struct fde_vector *vec = ob->u.sort;
897 int encoding = ob->s.b.encoding;
898 _Unwind_Ptr base = base_from_object (encoding, ob);
899 size_t lo, hi;
901 for (lo = 0, hi = vec->count; lo < hi; )
903 size_t i = (lo + hi) / 2;
904 const fde *f = vec->array[i];
905 _Unwind_Ptr pc_begin, pc_range;
906 const unsigned char *p;
908 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
909 &pc_begin);
910 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
912 if ((_Unwind_Ptr) pc < pc_begin)
913 hi = i;
914 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
915 lo = i + 1;
916 else
917 return f;
920 return NULL;
923 static inline const fde *
924 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
926 struct fde_vector *vec = ob->u.sort;
927 size_t lo, hi;
929 for (lo = 0, hi = vec->count; lo < hi; )
931 size_t i = (lo + hi) / 2;
932 const fde *f = vec->array[i];
933 _Unwind_Ptr pc_begin, pc_range;
934 const unsigned char *p;
935 int encoding;
937 encoding = get_fde_encoding (f);
938 p = read_encoded_value_with_base (encoding,
939 base_from_object (encoding, ob),
940 f->pc_begin, &pc_begin);
941 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
943 if ((_Unwind_Ptr) pc < pc_begin)
944 hi = i;
945 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
946 lo = i + 1;
947 else
948 return f;
951 return NULL;
954 static const fde *
955 search_object (struct object* ob, void *pc)
957 /* If the data hasn't been sorted, try to do this now. We may have
958 more memory available than last time we tried. */
959 if (! ob->s.b.sorted)
961 init_object (ob);
963 /* Despite the above comment, the normal reason to get here is
964 that we've not processed this object before. A quick range
965 check is in order. */
966 if (pc < ob->pc_begin)
967 return NULL;
970 if (ob->s.b.sorted)
972 if (ob->s.b.mixed_encoding)
973 return binary_search_mixed_encoding_fdes (ob, pc);
974 else if (ob->s.b.encoding == DW_EH_PE_absptr)
975 return binary_search_unencoded_fdes (ob, pc);
976 else
977 return binary_search_single_encoding_fdes (ob, pc);
979 else
981 /* Long slow laborious linear search, cos we've no memory. */
982 if (ob->s.b.from_array)
984 fde **p;
985 for (p = ob->u.array; *p ; p++)
987 const fde *f = linear_search_fdes (ob, *p, pc);
988 if (f)
989 return f;
991 return NULL;
993 else
994 return linear_search_fdes (ob, ob->u.single, pc);
998 const fde *
999 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1001 struct object *ob;
1002 const fde *f = NULL;
1004 init_object_mutex_once ();
1005 __gthread_mutex_lock (&object_mutex);
1007 /* Linear search through the classified objects, to find the one
1008 containing the pc. Note that pc_begin is sorted descending, and
1009 we expect objects to be non-overlapping. */
1010 for (ob = seen_objects; ob; ob = ob->next)
1011 if (pc >= ob->pc_begin)
1013 f = search_object (ob, pc);
1014 if (f)
1015 goto fini;
1016 break;
1019 /* Classify and search the objects we've not yet processed. */
1020 while ((ob = unseen_objects))
1022 struct object **p;
1024 unseen_objects = ob->next;
1025 f = search_object (ob, pc);
1027 /* Insert the object into the classified list. */
1028 for (p = &seen_objects; *p ; p = &(*p)->next)
1029 if ((*p)->pc_begin < ob->pc_begin)
1030 break;
1031 ob->next = *p;
1032 *p = ob;
1034 if (f)
1035 goto fini;
1038 fini:
1039 __gthread_mutex_unlock (&object_mutex);
1041 if (f)
1043 int encoding;
1044 _Unwind_Ptr func;
1046 bases->tbase = ob->tbase;
1047 bases->dbase = ob->dbase;
1049 encoding = ob->s.b.encoding;
1050 if (ob->s.b.mixed_encoding)
1051 encoding = get_fde_encoding (f);
1052 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1053 f->pc_begin, &func);
1054 bases->func = (void *) func;
1057 return f;