re PR bootstrap/50047 (Revision 177670 failed to bootstrap)
[official-gcc.git] / libgcc / unwind-dw2-fde.c
blob93d427165c46633527cf7ed5a04ca650a230c427
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
3 2009, 2010 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #ifndef _Unwind_Find_FDE
28 #include "tconfig.h"
29 #include "tsystem.h"
30 #include "coretypes.h"
31 #include "tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #endif
40 /* The unseen_objects list contains objects that have been registered
41 but not yet categorized in any way. The seen_objects list has had
42 its pc_begin and count fields initialized at minimum, and is sorted
43 by decreasing value of pc_begin. */
44 static struct object *unseen_objects;
45 static struct object *seen_objects;
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
49 #else
50 static __gthread_mutex_t object_mutex;
51 #endif
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
54 static void
55 init_object_mutex (void)
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
60 static void
61 init_object_mutex_once (void)
63 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
64 __gthread_once (&once, init_object_mutex);
66 #else
67 #define init_object_mutex_once()
68 #endif
70 /* Called from crtbegin.o to register the unwind info for an object. */
72 void
73 __register_frame_info_bases (const void *begin, struct object *ob,
74 void *tbase, void *dbase)
76 /* If .eh_frame is empty, don't register at all. */
77 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
78 return;
80 ob->pc_begin = (void *)-1;
81 ob->tbase = tbase;
82 ob->dbase = dbase;
83 ob->u.single = begin;
84 ob->s.i = 0;
85 ob->s.b.encoding = DW_EH_PE_omit;
86 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
87 ob->fde_end = NULL;
88 #endif
90 init_object_mutex_once ();
91 __gthread_mutex_lock (&object_mutex);
93 ob->next = unseen_objects;
94 unseen_objects = ob;
96 __gthread_mutex_unlock (&object_mutex);
99 void
100 __register_frame_info (const void *begin, struct object *ob)
102 __register_frame_info_bases (begin, ob, 0, 0);
105 void
106 __register_frame (void *begin)
108 struct object *ob;
110 /* If .eh_frame is empty, don't register at all. */
111 if (*(uword *) begin == 0)
112 return;
114 ob = malloc (sizeof (struct object));
115 __register_frame_info (begin, ob);
118 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
119 for different translation units. Called from the file generated by
120 collect2. */
122 void
123 __register_frame_info_table_bases (void *begin, struct object *ob,
124 void *tbase, void *dbase)
126 ob->pc_begin = (void *)-1;
127 ob->tbase = tbase;
128 ob->dbase = dbase;
129 ob->u.array = begin;
130 ob->s.i = 0;
131 ob->s.b.from_array = 1;
132 ob->s.b.encoding = DW_EH_PE_omit;
134 init_object_mutex_once ();
135 __gthread_mutex_lock (&object_mutex);
137 ob->next = unseen_objects;
138 unseen_objects = ob;
140 __gthread_mutex_unlock (&object_mutex);
143 void
144 __register_frame_info_table (void *begin, struct object *ob)
146 __register_frame_info_table_bases (begin, ob, 0, 0);
149 void
150 __register_frame_table (void *begin)
152 struct object *ob = malloc (sizeof (struct object));
153 __register_frame_info_table (begin, ob);
156 /* Called from crtbegin.o to deregister the unwind info for an object. */
157 /* ??? Glibc has for a while now exported __register_frame_info and
158 __deregister_frame_info. If we call __register_frame_info_bases
159 from crtbegin (wherein it is declared weak), and this object does
160 not get pulled from libgcc.a for other reasons, then the
161 invocation of __deregister_frame_info will be resolved from glibc.
162 Since the registration did not happen there, we'll die.
164 Therefore, declare a new deregistration entry point that does the
165 exact same thing, but will resolve to the same library as
166 implements __register_frame_info_bases. */
168 void *
169 __deregister_frame_info_bases (const void *begin)
171 struct object **p;
172 struct object *ob = 0;
174 /* If .eh_frame is empty, we haven't registered. */
175 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
176 return ob;
178 init_object_mutex_once ();
179 __gthread_mutex_lock (&object_mutex);
181 for (p = &unseen_objects; *p ; p = &(*p)->next)
182 if ((*p)->u.single == begin)
184 ob = *p;
185 *p = ob->next;
186 goto out;
189 for (p = &seen_objects; *p ; p = &(*p)->next)
190 if ((*p)->s.b.sorted)
192 if ((*p)->u.sort->orig_data == begin)
194 ob = *p;
195 *p = ob->next;
196 free (ob->u.sort);
197 goto out;
200 else
202 if ((*p)->u.single == begin)
204 ob = *p;
205 *p = ob->next;
206 goto out;
210 out:
211 __gthread_mutex_unlock (&object_mutex);
212 gcc_assert (ob);
213 return (void *) ob;
216 void *
217 __deregister_frame_info (const void *begin)
219 return __deregister_frame_info_bases (begin);
222 void
223 __deregister_frame (void *begin)
225 /* If .eh_frame is empty, we haven't registered. */
226 if (*(uword *) begin != 0)
227 free (__deregister_frame_info (begin));
231 /* Like base_of_encoded_value, but take the base from a struct object
232 instead of an _Unwind_Context. */
234 static _Unwind_Ptr
235 base_from_object (unsigned char encoding, struct object *ob)
237 if (encoding == DW_EH_PE_omit)
238 return 0;
240 switch (encoding & 0x70)
242 case DW_EH_PE_absptr:
243 case DW_EH_PE_pcrel:
244 case DW_EH_PE_aligned:
245 return 0;
247 case DW_EH_PE_textrel:
248 return (_Unwind_Ptr) ob->tbase;
249 case DW_EH_PE_datarel:
250 return (_Unwind_Ptr) ob->dbase;
251 default:
252 gcc_unreachable ();
256 /* Return the FDE pointer encoding from the CIE. */
257 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
259 static int
260 get_cie_encoding (const struct dwarf_cie *cie)
262 const unsigned char *aug, *p;
263 _Unwind_Ptr dummy;
264 _uleb128_t utmp;
265 _sleb128_t stmp;
267 aug = cie->augmentation;
268 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
269 if (__builtin_expect (cie->version >= 4, 0))
271 if (p[0] != sizeof (void *) || p[1] != 0)
272 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
273 address sizes or segment selectors. */
274 p += 2; /* Skip address size and segment size. */
277 if (aug[0] != 'z')
278 return DW_EH_PE_absptr;
280 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
281 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
282 if (cie->version == 1) /* Skip return address column. */
283 p++;
284 else
285 p = read_uleb128 (p, &utmp);
287 aug++; /* Skip 'z' */
288 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
289 while (1)
291 /* This is what we're looking for. */
292 if (*aug == 'R')
293 return *p;
294 /* Personality encoding and pointer. */
295 else if (*aug == 'P')
297 /* ??? Avoid dereferencing indirect pointers, since we're
298 faking the base address. Gotta keep DW_EH_PE_aligned
299 intact, however. */
300 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
302 /* LSDA encoding. */
303 else if (*aug == 'L')
304 p++;
305 /* Otherwise end of string, or unknown augmentation. */
306 else
307 return DW_EH_PE_absptr;
308 aug++;
312 static inline int
313 get_fde_encoding (const struct dwarf_fde *f)
315 return get_cie_encoding (get_cie (f));
319 /* Sorting an array of FDEs by address.
320 (Ideally we would have the linker sort the FDEs so we don't have to do
321 it at run time. But the linkers are not yet prepared for this.) */
323 /* Comparison routines. Three variants of increasing complexity. */
325 static int
326 fde_unencoded_compare (struct object *ob __attribute__((unused)),
327 const fde *x, const fde *y)
329 _Unwind_Ptr x_ptr, y_ptr;
330 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
331 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
333 if (x_ptr > y_ptr)
334 return 1;
335 if (x_ptr < y_ptr)
336 return -1;
337 return 0;
340 static int
341 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
343 _Unwind_Ptr base, x_ptr, y_ptr;
345 base = base_from_object (ob->s.b.encoding, ob);
346 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
347 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
349 if (x_ptr > y_ptr)
350 return 1;
351 if (x_ptr < y_ptr)
352 return -1;
353 return 0;
356 static int
357 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
359 int x_encoding, y_encoding;
360 _Unwind_Ptr x_ptr, y_ptr;
362 x_encoding = get_fde_encoding (x);
363 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
364 x->pc_begin, &x_ptr);
366 y_encoding = get_fde_encoding (y);
367 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
368 y->pc_begin, &y_ptr);
370 if (x_ptr > y_ptr)
371 return 1;
372 if (x_ptr < y_ptr)
373 return -1;
374 return 0;
377 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
380 /* This is a special mix of insertion sort and heap sort, optimized for
381 the data sets that actually occur. They look like
382 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
383 I.e. a linearly increasing sequence (coming from functions in the text
384 section), with additionally a few unordered elements (coming from functions
385 in gnu_linkonce sections) whose values are higher than the values in the
386 surrounding linear sequence (but not necessarily higher than the values
387 at the end of the linear sequence!).
388 The worst-case total run time is O(N) + O(n log (n)), where N is the
389 total number of FDEs and n is the number of erratic ones. */
391 struct fde_accumulator
393 struct fde_vector *linear;
394 struct fde_vector *erratic;
397 static inline int
398 start_fde_sort (struct fde_accumulator *accu, size_t count)
400 size_t size;
401 if (! count)
402 return 0;
404 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
405 if ((accu->linear = malloc (size)))
407 accu->linear->count = 0;
408 if ((accu->erratic = malloc (size)))
409 accu->erratic->count = 0;
410 return 1;
412 else
413 return 0;
416 static inline void
417 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
419 if (accu->linear)
420 accu->linear->array[accu->linear->count++] = this_fde;
423 /* Split LINEAR into a linear sequence with low values and an erratic
424 sequence with high values, put the linear one (of longest possible
425 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
427 Because the longest linear sequence we are trying to locate within the
428 incoming LINEAR array can be interspersed with (high valued) erratic
429 entries. We construct a chain indicating the sequenced entries.
430 To avoid having to allocate this chain, we overlay it onto the space of
431 the ERRATIC array during construction. A final pass iterates over the
432 chain to determine what should be placed in the ERRATIC array, and
433 what is the linear sequence. This overlay is safe from aliasing. */
435 static inline void
436 fde_split (struct object *ob, fde_compare_t fde_compare,
437 struct fde_vector *linear, struct fde_vector *erratic)
439 static const fde *marker;
440 size_t count = linear->count;
441 const fde *const *chain_end = &marker;
442 size_t i, j, k;
444 /* This should optimize out, but it is wise to make sure this assumption
445 is correct. Should these have different sizes, we cannot cast between
446 them and the overlaying onto ERRATIC will not work. */
447 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
449 for (i = 0; i < count; i++)
451 const fde *const *probe;
453 for (probe = chain_end;
454 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
455 probe = chain_end)
457 chain_end = (const fde *const*) erratic->array[probe - linear->array];
458 erratic->array[probe - linear->array] = NULL;
460 erratic->array[i] = (const fde *) chain_end;
461 chain_end = &linear->array[i];
464 /* Each entry in LINEAR which is part of the linear sequence we have
465 discovered will correspond to a non-NULL entry in the chain we built in
466 the ERRATIC array. */
467 for (i = j = k = 0; i < count; i++)
468 if (erratic->array[i])
469 linear->array[j++] = linear->array[i];
470 else
471 erratic->array[k++] = linear->array[i];
472 linear->count = j;
473 erratic->count = k;
476 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
478 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
479 for the first (root) node; push it down to its rightful place. */
481 static void
482 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
483 int lo, int hi)
485 int i, j;
487 for (i = lo, j = 2*i+1;
488 j < hi;
489 j = 2*i+1)
491 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
492 ++j;
494 if (fde_compare (ob, a[i], a[j]) < 0)
496 SWAP (a[i], a[j]);
497 i = j;
499 else
500 break;
504 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
505 use a name that does not conflict. */
507 static void
508 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
509 struct fde_vector *erratic)
511 /* For a description of this algorithm, see:
512 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
513 p. 60-61. */
514 const fde ** a = erratic->array;
515 /* A portion of the array is called a "heap" if for all i>=0:
516 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
517 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
518 size_t n = erratic->count;
519 int m;
521 /* Expand our heap incrementally from the end of the array, heapifying
522 each resulting semi-heap as we go. After each step, a[m] is the top
523 of a heap. */
524 for (m = n/2-1; m >= 0; --m)
525 frame_downheap (ob, fde_compare, a, m, n);
527 /* Shrink our heap incrementally from the end of the array, first
528 swapping out the largest element a[0] and then re-heapifying the
529 resulting semi-heap. After each step, a[0..m) is a heap. */
530 for (m = n-1; m >= 1; --m)
532 SWAP (a[0], a[m]);
533 frame_downheap (ob, fde_compare, a, 0, m);
535 #undef SWAP
538 /* Merge V1 and V2, both sorted, and put the result into V1. */
539 static inline void
540 fde_merge (struct object *ob, fde_compare_t fde_compare,
541 struct fde_vector *v1, struct fde_vector *v2)
543 size_t i1, i2;
544 const fde * fde2;
546 i2 = v2->count;
547 if (i2 > 0)
549 i1 = v1->count;
552 i2--;
553 fde2 = v2->array[i2];
554 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
556 v1->array[i1+i2] = v1->array[i1-1];
557 i1--;
559 v1->array[i1+i2] = fde2;
561 while (i2 > 0);
562 v1->count += v2->count;
566 static inline void
567 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
569 fde_compare_t fde_compare;
571 gcc_assert (!accu->linear || accu->linear->count == count);
573 if (ob->s.b.mixed_encoding)
574 fde_compare = fde_mixed_encoding_compare;
575 else if (ob->s.b.encoding == DW_EH_PE_absptr)
576 fde_compare = fde_unencoded_compare;
577 else
578 fde_compare = fde_single_encoding_compare;
580 if (accu->erratic)
582 fde_split (ob, fde_compare, accu->linear, accu->erratic);
583 gcc_assert (accu->linear->count + accu->erratic->count == count);
584 frame_heapsort (ob, fde_compare, accu->erratic);
585 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
586 free (accu->erratic);
588 else
590 /* We've not managed to malloc an erratic array,
591 so heap sort in the linear one. */
592 frame_heapsort (ob, fde_compare, accu->linear);
597 /* Update encoding, mixed_encoding, and pc_begin for OB for the
598 fde array beginning at THIS_FDE. Return the number of fdes
599 encountered along the way. */
601 static size_t
602 classify_object_over_fdes (struct object *ob, const fde *this_fde)
604 const struct dwarf_cie *last_cie = 0;
605 size_t count = 0;
606 int encoding = DW_EH_PE_absptr;
607 _Unwind_Ptr base = 0;
609 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
611 const struct dwarf_cie *this_cie;
612 _Unwind_Ptr mask, pc_begin;
614 /* Skip CIEs. */
615 if (this_fde->CIE_delta == 0)
616 continue;
618 /* Determine the encoding for this FDE. Note mixed encoded
619 objects for later. */
620 this_cie = get_cie (this_fde);
621 if (this_cie != last_cie)
623 last_cie = this_cie;
624 encoding = get_cie_encoding (this_cie);
625 if (encoding == DW_EH_PE_omit)
626 return -1;
627 base = base_from_object (encoding, ob);
628 if (ob->s.b.encoding == DW_EH_PE_omit)
629 ob->s.b.encoding = encoding;
630 else if (ob->s.b.encoding != encoding)
631 ob->s.b.mixed_encoding = 1;
634 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
635 &pc_begin);
637 /* Take care to ignore link-once functions that were removed.
638 In these cases, the function address will be NULL, but if
639 the encoding is smaller than a pointer a true NULL may not
640 be representable. Assume 0 in the representable bits is NULL. */
641 mask = size_of_encoded_value (encoding);
642 if (mask < sizeof (void *))
643 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
644 else
645 mask = -1;
647 if ((pc_begin & mask) == 0)
648 continue;
650 count += 1;
651 if ((void *) pc_begin < ob->pc_begin)
652 ob->pc_begin = (void *) pc_begin;
655 return count;
658 static void
659 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
661 const struct dwarf_cie *last_cie = 0;
662 int encoding = ob->s.b.encoding;
663 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
665 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
667 const struct dwarf_cie *this_cie;
669 /* Skip CIEs. */
670 if (this_fde->CIE_delta == 0)
671 continue;
673 if (ob->s.b.mixed_encoding)
675 /* Determine the encoding for this FDE. Note mixed encoded
676 objects for later. */
677 this_cie = get_cie (this_fde);
678 if (this_cie != last_cie)
680 last_cie = this_cie;
681 encoding = get_cie_encoding (this_cie);
682 base = base_from_object (encoding, ob);
686 if (encoding == DW_EH_PE_absptr)
688 _Unwind_Ptr ptr;
689 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
690 if (ptr == 0)
691 continue;
693 else
695 _Unwind_Ptr pc_begin, mask;
697 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
698 &pc_begin);
700 /* Take care to ignore link-once functions that were removed.
701 In these cases, the function address will be NULL, but if
702 the encoding is smaller than a pointer a true NULL may not
703 be representable. Assume 0 in the representable bits is NULL. */
704 mask = size_of_encoded_value (encoding);
705 if (mask < sizeof (void *))
706 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
707 else
708 mask = -1;
710 if ((pc_begin & mask) == 0)
711 continue;
714 fde_insert (accu, this_fde);
718 /* Set up a sorted array of pointers to FDEs for a loaded object. We
719 count up the entries before allocating the array because it's likely to
720 be faster. We can be called multiple times, should we have failed to
721 allocate a sorted fde array on a previous occasion. */
723 static inline void
724 init_object (struct object* ob)
726 struct fde_accumulator accu;
727 size_t count;
729 count = ob->s.b.count;
730 if (count == 0)
732 if (ob->s.b.from_array)
734 fde **p = ob->u.array;
735 for (count = 0; *p; ++p)
737 size_t cur_count = classify_object_over_fdes (ob, *p);
738 if (cur_count == (size_t) -1)
739 goto unhandled_fdes;
740 count += cur_count;
743 else
745 count = classify_object_over_fdes (ob, ob->u.single);
746 if (count == (size_t) -1)
748 static const fde terminator;
749 unhandled_fdes:
750 ob->s.i = 0;
751 ob->s.b.encoding = DW_EH_PE_omit;
752 ob->u.single = &terminator;
753 return;
757 /* The count field we have in the main struct object is somewhat
758 limited, but should suffice for virtually all cases. If the
759 counted value doesn't fit, re-write a zero. The worst that
760 happens is that we re-count next time -- admittedly non-trivial
761 in that this implies some 2M fdes, but at least we function. */
762 ob->s.b.count = count;
763 if (ob->s.b.count != count)
764 ob->s.b.count = 0;
767 if (!start_fde_sort (&accu, count))
768 return;
770 if (ob->s.b.from_array)
772 fde **p;
773 for (p = ob->u.array; *p; ++p)
774 add_fdes (ob, &accu, *p);
776 else
777 add_fdes (ob, &accu, ob->u.single);
779 end_fde_sort (ob, &accu, count);
781 /* Save the original fde pointer, since this is the key by which the
782 DSO will deregister the object. */
783 accu.linear->orig_data = ob->u.single;
784 ob->u.sort = accu.linear;
786 ob->s.b.sorted = 1;
789 /* A linear search through a set of FDEs for the given PC. This is
790 used when there was insufficient memory to allocate and sort an
791 array. */
793 static const fde *
794 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
796 const struct dwarf_cie *last_cie = 0;
797 int encoding = ob->s.b.encoding;
798 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
800 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
802 const struct dwarf_cie *this_cie;
803 _Unwind_Ptr pc_begin, pc_range;
805 /* Skip CIEs. */
806 if (this_fde->CIE_delta == 0)
807 continue;
809 if (ob->s.b.mixed_encoding)
811 /* Determine the encoding for this FDE. Note mixed encoded
812 objects for later. */
813 this_cie = get_cie (this_fde);
814 if (this_cie != last_cie)
816 last_cie = this_cie;
817 encoding = get_cie_encoding (this_cie);
818 base = base_from_object (encoding, ob);
822 if (encoding == DW_EH_PE_absptr)
824 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
825 pc_begin = pc_array[0];
826 pc_range = pc_array[1];
827 if (pc_begin == 0)
828 continue;
830 else
832 _Unwind_Ptr mask;
833 const unsigned char *p;
835 p = read_encoded_value_with_base (encoding, base,
836 this_fde->pc_begin, &pc_begin);
837 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
839 /* Take care to ignore link-once functions that were removed.
840 In these cases, the function address will be NULL, but if
841 the encoding is smaller than a pointer a true NULL may not
842 be representable. Assume 0 in the representable bits is NULL. */
843 mask = size_of_encoded_value (encoding);
844 if (mask < sizeof (void *))
845 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
846 else
847 mask = -1;
849 if ((pc_begin & mask) == 0)
850 continue;
853 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
854 return this_fde;
857 return NULL;
860 /* Binary search for an FDE containing the given PC. Here are three
861 implementations of increasing complexity. */
863 static inline const fde *
864 binary_search_unencoded_fdes (struct object *ob, void *pc)
866 struct fde_vector *vec = ob->u.sort;
867 size_t lo, hi;
869 for (lo = 0, hi = vec->count; lo < hi; )
871 size_t i = (lo + hi) / 2;
872 const fde *const f = vec->array[i];
873 void *pc_begin;
874 uaddr pc_range;
875 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
876 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
878 if (pc < pc_begin)
879 hi = i;
880 else if (pc >= pc_begin + pc_range)
881 lo = i + 1;
882 else
883 return f;
886 return NULL;
889 static inline const fde *
890 binary_search_single_encoding_fdes (struct object *ob, void *pc)
892 struct fde_vector *vec = ob->u.sort;
893 int encoding = ob->s.b.encoding;
894 _Unwind_Ptr base = base_from_object (encoding, ob);
895 size_t lo, hi;
897 for (lo = 0, hi = vec->count; lo < hi; )
899 size_t i = (lo + hi) / 2;
900 const fde *f = vec->array[i];
901 _Unwind_Ptr pc_begin, pc_range;
902 const unsigned char *p;
904 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
905 &pc_begin);
906 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
908 if ((_Unwind_Ptr) pc < pc_begin)
909 hi = i;
910 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
911 lo = i + 1;
912 else
913 return f;
916 return NULL;
919 static inline const fde *
920 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
922 struct fde_vector *vec = ob->u.sort;
923 size_t lo, hi;
925 for (lo = 0, hi = vec->count; lo < hi; )
927 size_t i = (lo + hi) / 2;
928 const fde *f = vec->array[i];
929 _Unwind_Ptr pc_begin, pc_range;
930 const unsigned char *p;
931 int encoding;
933 encoding = get_fde_encoding (f);
934 p = read_encoded_value_with_base (encoding,
935 base_from_object (encoding, ob),
936 f->pc_begin, &pc_begin);
937 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
939 if ((_Unwind_Ptr) pc < pc_begin)
940 hi = i;
941 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
942 lo = i + 1;
943 else
944 return f;
947 return NULL;
950 static const fde *
951 search_object (struct object* ob, void *pc)
953 /* If the data hasn't been sorted, try to do this now. We may have
954 more memory available than last time we tried. */
955 if (! ob->s.b.sorted)
957 init_object (ob);
959 /* Despite the above comment, the normal reason to get here is
960 that we've not processed this object before. A quick range
961 check is in order. */
962 if (pc < ob->pc_begin)
963 return NULL;
966 if (ob->s.b.sorted)
968 if (ob->s.b.mixed_encoding)
969 return binary_search_mixed_encoding_fdes (ob, pc);
970 else if (ob->s.b.encoding == DW_EH_PE_absptr)
971 return binary_search_unencoded_fdes (ob, pc);
972 else
973 return binary_search_single_encoding_fdes (ob, pc);
975 else
977 /* Long slow laborious linear search, cos we've no memory. */
978 if (ob->s.b.from_array)
980 fde **p;
981 for (p = ob->u.array; *p ; p++)
983 const fde *f = linear_search_fdes (ob, *p, pc);
984 if (f)
985 return f;
987 return NULL;
989 else
990 return linear_search_fdes (ob, ob->u.single, pc);
994 const fde *
995 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
997 struct object *ob;
998 const fde *f = NULL;
1000 init_object_mutex_once ();
1001 __gthread_mutex_lock (&object_mutex);
1003 /* Linear search through the classified objects, to find the one
1004 containing the pc. Note that pc_begin is sorted descending, and
1005 we expect objects to be non-overlapping. */
1006 for (ob = seen_objects; ob; ob = ob->next)
1007 if (pc >= ob->pc_begin)
1009 f = search_object (ob, pc);
1010 if (f)
1011 goto fini;
1012 break;
1015 /* Classify and search the objects we've not yet processed. */
1016 while ((ob = unseen_objects))
1018 struct object **p;
1020 unseen_objects = ob->next;
1021 f = search_object (ob, pc);
1023 /* Insert the object into the classified list. */
1024 for (p = &seen_objects; *p ; p = &(*p)->next)
1025 if ((*p)->pc_begin < ob->pc_begin)
1026 break;
1027 ob->next = *p;
1028 *p = ob;
1030 if (f)
1031 goto fini;
1034 fini:
1035 __gthread_mutex_unlock (&object_mutex);
1037 if (f)
1039 int encoding;
1040 _Unwind_Ptr func;
1042 bases->tbase = ob->tbase;
1043 bases->dbase = ob->dbase;
1045 encoding = ob->s.b.encoding;
1046 if (ob->s.b.mixed_encoding)
1047 encoding = get_fde_encoding (f);
1048 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1049 f->pc_begin, &func);
1050 bases->func = (void *) func;
1053 return f;