PR c++/54441
[official-gcc.git] / libgcc / unwind-dw2-fde.c
blobb99a432edb9788dd5fd95fd84e08896071a5e847
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
3 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #ifndef _Unwind_Find_FDE
28 #include "tconfig.h"
29 #include "tsystem.h"
30 #include "coretypes.h"
31 #include "tm.h"
32 #include "libgcc_tm.h"
33 #include "dwarf2.h"
34 #include "unwind.h"
35 #define NO_BASE_OF_ENCODED_VALUE
36 #include "unwind-pe.h"
37 #include "unwind-dw2-fde.h"
38 #include "gthr.h"
39 #endif
41 /* The unseen_objects list contains objects that have been registered
42 but not yet categorized in any way. The seen_objects list has had
43 its pc_begin and count fields initialized at minimum, and is sorted
44 by decreasing value of pc_begin. */
45 static struct object *unseen_objects;
46 static struct object *seen_objects;
48 #ifdef __GTHREAD_MUTEX_INIT
49 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
50 #define init_object_mutex_once()
51 #else
52 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
53 static __gthread_mutex_t object_mutex;
55 static void
56 init_object_mutex (void)
58 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
61 static void
62 init_object_mutex_once (void)
64 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
65 __gthread_once (&once, init_object_mutex);
67 #else
68 /* ??? Several targets include this file with stubbing parts of gthr.h
69 and expect no locking to be done. */
70 #define init_object_mutex_once()
71 static __gthread_mutex_t object_mutex;
72 #endif
73 #endif
75 /* Called from crtbegin.o to register the unwind info for an object. */
77 void
78 __register_frame_info_bases (const void *begin, struct object *ob,
79 void *tbase, void *dbase)
81 /* If .eh_frame is empty, don't register at all. */
82 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
83 return;
85 ob->pc_begin = (void *)-1;
86 ob->tbase = tbase;
87 ob->dbase = dbase;
88 ob->u.single = begin;
89 ob->s.i = 0;
90 ob->s.b.encoding = DW_EH_PE_omit;
91 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
92 ob->fde_end = NULL;
93 #endif
95 init_object_mutex_once ();
96 __gthread_mutex_lock (&object_mutex);
98 ob->next = unseen_objects;
99 unseen_objects = ob;
101 __gthread_mutex_unlock (&object_mutex);
104 void
105 __register_frame_info (const void *begin, struct object *ob)
107 __register_frame_info_bases (begin, ob, 0, 0);
110 void
111 __register_frame (void *begin)
113 struct object *ob;
115 /* If .eh_frame is empty, don't register at all. */
116 if (*(uword *) begin == 0)
117 return;
119 ob = malloc (sizeof (struct object));
120 __register_frame_info (begin, ob);
123 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
124 for different translation units. Called from the file generated by
125 collect2. */
127 void
128 __register_frame_info_table_bases (void *begin, struct object *ob,
129 void *tbase, void *dbase)
131 ob->pc_begin = (void *)-1;
132 ob->tbase = tbase;
133 ob->dbase = dbase;
134 ob->u.array = begin;
135 ob->s.i = 0;
136 ob->s.b.from_array = 1;
137 ob->s.b.encoding = DW_EH_PE_omit;
139 init_object_mutex_once ();
140 __gthread_mutex_lock (&object_mutex);
142 ob->next = unseen_objects;
143 unseen_objects = ob;
145 __gthread_mutex_unlock (&object_mutex);
148 void
149 __register_frame_info_table (void *begin, struct object *ob)
151 __register_frame_info_table_bases (begin, ob, 0, 0);
154 void
155 __register_frame_table (void *begin)
157 struct object *ob = malloc (sizeof (struct object));
158 __register_frame_info_table (begin, ob);
161 /* Called from crtbegin.o to deregister the unwind info for an object. */
162 /* ??? Glibc has for a while now exported __register_frame_info and
163 __deregister_frame_info. If we call __register_frame_info_bases
164 from crtbegin (wherein it is declared weak), and this object does
165 not get pulled from libgcc.a for other reasons, then the
166 invocation of __deregister_frame_info will be resolved from glibc.
167 Since the registration did not happen there, we'll die.
169 Therefore, declare a new deregistration entry point that does the
170 exact same thing, but will resolve to the same library as
171 implements __register_frame_info_bases. */
173 void *
174 __deregister_frame_info_bases (const void *begin)
176 struct object **p;
177 struct object *ob = 0;
179 /* If .eh_frame is empty, we haven't registered. */
180 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
181 return ob;
183 init_object_mutex_once ();
184 __gthread_mutex_lock (&object_mutex);
186 for (p = &unseen_objects; *p ; p = &(*p)->next)
187 if ((*p)->u.single == begin)
189 ob = *p;
190 *p = ob->next;
191 goto out;
194 for (p = &seen_objects; *p ; p = &(*p)->next)
195 if ((*p)->s.b.sorted)
197 if ((*p)->u.sort->orig_data == begin)
199 ob = *p;
200 *p = ob->next;
201 free (ob->u.sort);
202 goto out;
205 else
207 if ((*p)->u.single == begin)
209 ob = *p;
210 *p = ob->next;
211 goto out;
215 out:
216 __gthread_mutex_unlock (&object_mutex);
217 gcc_assert (ob);
218 return (void *) ob;
221 void *
222 __deregister_frame_info (const void *begin)
224 return __deregister_frame_info_bases (begin);
227 void
228 __deregister_frame (void *begin)
230 /* If .eh_frame is empty, we haven't registered. */
231 if (*(uword *) begin != 0)
232 free (__deregister_frame_info (begin));
236 /* Like base_of_encoded_value, but take the base from a struct object
237 instead of an _Unwind_Context. */
239 static _Unwind_Ptr
240 base_from_object (unsigned char encoding, struct object *ob)
242 if (encoding == DW_EH_PE_omit)
243 return 0;
245 switch (encoding & 0x70)
247 case DW_EH_PE_absptr:
248 case DW_EH_PE_pcrel:
249 case DW_EH_PE_aligned:
250 return 0;
252 case DW_EH_PE_textrel:
253 return (_Unwind_Ptr) ob->tbase;
254 case DW_EH_PE_datarel:
255 return (_Unwind_Ptr) ob->dbase;
256 default:
257 gcc_unreachable ();
261 /* Return the FDE pointer encoding from the CIE. */
262 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
264 static int
265 get_cie_encoding (const struct dwarf_cie *cie)
267 const unsigned char *aug, *p;
268 _Unwind_Ptr dummy;
269 _uleb128_t utmp;
270 _sleb128_t stmp;
272 aug = cie->augmentation;
273 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
274 if (__builtin_expect (cie->version >= 4, 0))
276 if (p[0] != sizeof (void *) || p[1] != 0)
277 return DW_EH_PE_omit; /* We are not prepared to handle unexpected
278 address sizes or segment selectors. */
279 p += 2; /* Skip address size and segment size. */
282 if (aug[0] != 'z')
283 return DW_EH_PE_absptr;
285 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
286 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
287 if (cie->version == 1) /* Skip return address column. */
288 p++;
289 else
290 p = read_uleb128 (p, &utmp);
292 aug++; /* Skip 'z' */
293 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
294 while (1)
296 /* This is what we're looking for. */
297 if (*aug == 'R')
298 return *p;
299 /* Personality encoding and pointer. */
300 else if (*aug == 'P')
302 /* ??? Avoid dereferencing indirect pointers, since we're
303 faking the base address. Gotta keep DW_EH_PE_aligned
304 intact, however. */
305 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
307 /* LSDA encoding. */
308 else if (*aug == 'L')
309 p++;
310 /* Otherwise end of string, or unknown augmentation. */
311 else
312 return DW_EH_PE_absptr;
313 aug++;
317 static inline int
318 get_fde_encoding (const struct dwarf_fde *f)
320 return get_cie_encoding (get_cie (f));
324 /* Sorting an array of FDEs by address.
325 (Ideally we would have the linker sort the FDEs so we don't have to do
326 it at run time. But the linkers are not yet prepared for this.) */
328 /* Comparison routines. Three variants of increasing complexity. */
330 static int
331 fde_unencoded_compare (struct object *ob __attribute__((unused)),
332 const fde *x, const fde *y)
334 _Unwind_Ptr x_ptr, y_ptr;
335 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
336 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
338 if (x_ptr > y_ptr)
339 return 1;
340 if (x_ptr < y_ptr)
341 return -1;
342 return 0;
345 static int
346 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
348 _Unwind_Ptr base, x_ptr, y_ptr;
350 base = base_from_object (ob->s.b.encoding, ob);
351 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
352 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
354 if (x_ptr > y_ptr)
355 return 1;
356 if (x_ptr < y_ptr)
357 return -1;
358 return 0;
361 static int
362 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
364 int x_encoding, y_encoding;
365 _Unwind_Ptr x_ptr, y_ptr;
367 x_encoding = get_fde_encoding (x);
368 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
369 x->pc_begin, &x_ptr);
371 y_encoding = get_fde_encoding (y);
372 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
373 y->pc_begin, &y_ptr);
375 if (x_ptr > y_ptr)
376 return 1;
377 if (x_ptr < y_ptr)
378 return -1;
379 return 0;
382 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
385 /* This is a special mix of insertion sort and heap sort, optimized for
386 the data sets that actually occur. They look like
387 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
388 I.e. a linearly increasing sequence (coming from functions in the text
389 section), with additionally a few unordered elements (coming from functions
390 in gnu_linkonce sections) whose values are higher than the values in the
391 surrounding linear sequence (but not necessarily higher than the values
392 at the end of the linear sequence!).
393 The worst-case total run time is O(N) + O(n log (n)), where N is the
394 total number of FDEs and n is the number of erratic ones. */
396 struct fde_accumulator
398 struct fde_vector *linear;
399 struct fde_vector *erratic;
402 static inline int
403 start_fde_sort (struct fde_accumulator *accu, size_t count)
405 size_t size;
406 if (! count)
407 return 0;
409 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
410 if ((accu->linear = malloc (size)))
412 accu->linear->count = 0;
413 if ((accu->erratic = malloc (size)))
414 accu->erratic->count = 0;
415 return 1;
417 else
418 return 0;
421 static inline void
422 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
424 if (accu->linear)
425 accu->linear->array[accu->linear->count++] = this_fde;
428 /* Split LINEAR into a linear sequence with low values and an erratic
429 sequence with high values, put the linear one (of longest possible
430 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
432 Because the longest linear sequence we are trying to locate within the
433 incoming LINEAR array can be interspersed with (high valued) erratic
434 entries. We construct a chain indicating the sequenced entries.
435 To avoid having to allocate this chain, we overlay it onto the space of
436 the ERRATIC array during construction. A final pass iterates over the
437 chain to determine what should be placed in the ERRATIC array, and
438 what is the linear sequence. This overlay is safe from aliasing. */
440 static inline void
441 fde_split (struct object *ob, fde_compare_t fde_compare,
442 struct fde_vector *linear, struct fde_vector *erratic)
444 static const fde *marker;
445 size_t count = linear->count;
446 const fde *const *chain_end = &marker;
447 size_t i, j, k;
449 /* This should optimize out, but it is wise to make sure this assumption
450 is correct. Should these have different sizes, we cannot cast between
451 them and the overlaying onto ERRATIC will not work. */
452 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
454 for (i = 0; i < count; i++)
456 const fde *const *probe;
458 for (probe = chain_end;
459 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
460 probe = chain_end)
462 chain_end = (const fde *const*) erratic->array[probe - linear->array];
463 erratic->array[probe - linear->array] = NULL;
465 erratic->array[i] = (const fde *) chain_end;
466 chain_end = &linear->array[i];
469 /* Each entry in LINEAR which is part of the linear sequence we have
470 discovered will correspond to a non-NULL entry in the chain we built in
471 the ERRATIC array. */
472 for (i = j = k = 0; i < count; i++)
473 if (erratic->array[i])
474 linear->array[j++] = linear->array[i];
475 else
476 erratic->array[k++] = linear->array[i];
477 linear->count = j;
478 erratic->count = k;
481 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
483 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
484 for the first (root) node; push it down to its rightful place. */
486 static void
487 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
488 int lo, int hi)
490 int i, j;
492 for (i = lo, j = 2*i+1;
493 j < hi;
494 j = 2*i+1)
496 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
497 ++j;
499 if (fde_compare (ob, a[i], a[j]) < 0)
501 SWAP (a[i], a[j]);
502 i = j;
504 else
505 break;
509 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
510 use a name that does not conflict. */
512 static void
513 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
514 struct fde_vector *erratic)
516 /* For a description of this algorithm, see:
517 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
518 p. 60-61. */
519 const fde ** a = erratic->array;
520 /* A portion of the array is called a "heap" if for all i>=0:
521 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
522 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
523 size_t n = erratic->count;
524 int m;
526 /* Expand our heap incrementally from the end of the array, heapifying
527 each resulting semi-heap as we go. After each step, a[m] is the top
528 of a heap. */
529 for (m = n/2-1; m >= 0; --m)
530 frame_downheap (ob, fde_compare, a, m, n);
532 /* Shrink our heap incrementally from the end of the array, first
533 swapping out the largest element a[0] and then re-heapifying the
534 resulting semi-heap. After each step, a[0..m) is a heap. */
535 for (m = n-1; m >= 1; --m)
537 SWAP (a[0], a[m]);
538 frame_downheap (ob, fde_compare, a, 0, m);
540 #undef SWAP
543 /* Merge V1 and V2, both sorted, and put the result into V1. */
544 static inline void
545 fde_merge (struct object *ob, fde_compare_t fde_compare,
546 struct fde_vector *v1, struct fde_vector *v2)
548 size_t i1, i2;
549 const fde * fde2;
551 i2 = v2->count;
552 if (i2 > 0)
554 i1 = v1->count;
557 i2--;
558 fde2 = v2->array[i2];
559 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
561 v1->array[i1+i2] = v1->array[i1-1];
562 i1--;
564 v1->array[i1+i2] = fde2;
566 while (i2 > 0);
567 v1->count += v2->count;
571 static inline void
572 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
574 fde_compare_t fde_compare;
576 gcc_assert (!accu->linear || accu->linear->count == count);
578 if (ob->s.b.mixed_encoding)
579 fde_compare = fde_mixed_encoding_compare;
580 else if (ob->s.b.encoding == DW_EH_PE_absptr)
581 fde_compare = fde_unencoded_compare;
582 else
583 fde_compare = fde_single_encoding_compare;
585 if (accu->erratic)
587 fde_split (ob, fde_compare, accu->linear, accu->erratic);
588 gcc_assert (accu->linear->count + accu->erratic->count == count);
589 frame_heapsort (ob, fde_compare, accu->erratic);
590 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
591 free (accu->erratic);
593 else
595 /* We've not managed to malloc an erratic array,
596 so heap sort in the linear one. */
597 frame_heapsort (ob, fde_compare, accu->linear);
602 /* Update encoding, mixed_encoding, and pc_begin for OB for the
603 fde array beginning at THIS_FDE. Return the number of fdes
604 encountered along the way. */
606 static size_t
607 classify_object_over_fdes (struct object *ob, const fde *this_fde)
609 const struct dwarf_cie *last_cie = 0;
610 size_t count = 0;
611 int encoding = DW_EH_PE_absptr;
612 _Unwind_Ptr base = 0;
614 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
616 const struct dwarf_cie *this_cie;
617 _Unwind_Ptr mask, pc_begin;
619 /* Skip CIEs. */
620 if (this_fde->CIE_delta == 0)
621 continue;
623 /* Determine the encoding for this FDE. Note mixed encoded
624 objects for later. */
625 this_cie = get_cie (this_fde);
626 if (this_cie != last_cie)
628 last_cie = this_cie;
629 encoding = get_cie_encoding (this_cie);
630 if (encoding == DW_EH_PE_omit)
631 return -1;
632 base = base_from_object (encoding, ob);
633 if (ob->s.b.encoding == DW_EH_PE_omit)
634 ob->s.b.encoding = encoding;
635 else if (ob->s.b.encoding != encoding)
636 ob->s.b.mixed_encoding = 1;
639 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
640 &pc_begin);
642 /* Take care to ignore link-once functions that were removed.
643 In these cases, the function address will be NULL, but if
644 the encoding is smaller than a pointer a true NULL may not
645 be representable. Assume 0 in the representable bits is NULL. */
646 mask = size_of_encoded_value (encoding);
647 if (mask < sizeof (void *))
648 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
649 else
650 mask = -1;
652 if ((pc_begin & mask) == 0)
653 continue;
655 count += 1;
656 if ((void *) pc_begin < ob->pc_begin)
657 ob->pc_begin = (void *) pc_begin;
660 return count;
663 static void
664 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
666 const struct dwarf_cie *last_cie = 0;
667 int encoding = ob->s.b.encoding;
668 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
670 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
672 const struct dwarf_cie *this_cie;
674 /* Skip CIEs. */
675 if (this_fde->CIE_delta == 0)
676 continue;
678 if (ob->s.b.mixed_encoding)
680 /* Determine the encoding for this FDE. Note mixed encoded
681 objects for later. */
682 this_cie = get_cie (this_fde);
683 if (this_cie != last_cie)
685 last_cie = this_cie;
686 encoding = get_cie_encoding (this_cie);
687 base = base_from_object (encoding, ob);
691 if (encoding == DW_EH_PE_absptr)
693 _Unwind_Ptr ptr;
694 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
695 if (ptr == 0)
696 continue;
698 else
700 _Unwind_Ptr pc_begin, mask;
702 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
703 &pc_begin);
705 /* Take care to ignore link-once functions that were removed.
706 In these cases, the function address will be NULL, but if
707 the encoding is smaller than a pointer a true NULL may not
708 be representable. Assume 0 in the representable bits is NULL. */
709 mask = size_of_encoded_value (encoding);
710 if (mask < sizeof (void *))
711 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
712 else
713 mask = -1;
715 if ((pc_begin & mask) == 0)
716 continue;
719 fde_insert (accu, this_fde);
723 /* Set up a sorted array of pointers to FDEs for a loaded object. We
724 count up the entries before allocating the array because it's likely to
725 be faster. We can be called multiple times, should we have failed to
726 allocate a sorted fde array on a previous occasion. */
728 static inline void
729 init_object (struct object* ob)
731 struct fde_accumulator accu;
732 size_t count;
734 count = ob->s.b.count;
735 if (count == 0)
737 if (ob->s.b.from_array)
739 fde **p = ob->u.array;
740 for (count = 0; *p; ++p)
742 size_t cur_count = classify_object_over_fdes (ob, *p);
743 if (cur_count == (size_t) -1)
744 goto unhandled_fdes;
745 count += cur_count;
748 else
750 count = classify_object_over_fdes (ob, ob->u.single);
751 if (count == (size_t) -1)
753 static const fde terminator;
754 unhandled_fdes:
755 ob->s.i = 0;
756 ob->s.b.encoding = DW_EH_PE_omit;
757 ob->u.single = &terminator;
758 return;
762 /* The count field we have in the main struct object is somewhat
763 limited, but should suffice for virtually all cases. If the
764 counted value doesn't fit, re-write a zero. The worst that
765 happens is that we re-count next time -- admittedly non-trivial
766 in that this implies some 2M fdes, but at least we function. */
767 ob->s.b.count = count;
768 if (ob->s.b.count != count)
769 ob->s.b.count = 0;
772 if (!start_fde_sort (&accu, count))
773 return;
775 if (ob->s.b.from_array)
777 fde **p;
778 for (p = ob->u.array; *p; ++p)
779 add_fdes (ob, &accu, *p);
781 else
782 add_fdes (ob, &accu, ob->u.single);
784 end_fde_sort (ob, &accu, count);
786 /* Save the original fde pointer, since this is the key by which the
787 DSO will deregister the object. */
788 accu.linear->orig_data = ob->u.single;
789 ob->u.sort = accu.linear;
791 ob->s.b.sorted = 1;
794 /* A linear search through a set of FDEs for the given PC. This is
795 used when there was insufficient memory to allocate and sort an
796 array. */
798 static const fde *
799 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
801 const struct dwarf_cie *last_cie = 0;
802 int encoding = ob->s.b.encoding;
803 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
805 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
807 const struct dwarf_cie *this_cie;
808 _Unwind_Ptr pc_begin, pc_range;
810 /* Skip CIEs. */
811 if (this_fde->CIE_delta == 0)
812 continue;
814 if (ob->s.b.mixed_encoding)
816 /* Determine the encoding for this FDE. Note mixed encoded
817 objects for later. */
818 this_cie = get_cie (this_fde);
819 if (this_cie != last_cie)
821 last_cie = this_cie;
822 encoding = get_cie_encoding (this_cie);
823 base = base_from_object (encoding, ob);
827 if (encoding == DW_EH_PE_absptr)
829 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
830 pc_begin = pc_array[0];
831 pc_range = pc_array[1];
832 if (pc_begin == 0)
833 continue;
835 else
837 _Unwind_Ptr mask;
838 const unsigned char *p;
840 p = read_encoded_value_with_base (encoding, base,
841 this_fde->pc_begin, &pc_begin);
842 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
844 /* Take care to ignore link-once functions that were removed.
845 In these cases, the function address will be NULL, but if
846 the encoding is smaller than a pointer a true NULL may not
847 be representable. Assume 0 in the representable bits is NULL. */
848 mask = size_of_encoded_value (encoding);
849 if (mask < sizeof (void *))
850 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
851 else
852 mask = -1;
854 if ((pc_begin & mask) == 0)
855 continue;
858 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
859 return this_fde;
862 return NULL;
865 /* Binary search for an FDE containing the given PC. Here are three
866 implementations of increasing complexity. */
868 static inline const fde *
869 binary_search_unencoded_fdes (struct object *ob, void *pc)
871 struct fde_vector *vec = ob->u.sort;
872 size_t lo, hi;
874 for (lo = 0, hi = vec->count; lo < hi; )
876 size_t i = (lo + hi) / 2;
877 const fde *const f = vec->array[i];
878 void *pc_begin;
879 uaddr pc_range;
880 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
881 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
883 if (pc < pc_begin)
884 hi = i;
885 else if (pc >= pc_begin + pc_range)
886 lo = i + 1;
887 else
888 return f;
891 return NULL;
894 static inline const fde *
895 binary_search_single_encoding_fdes (struct object *ob, void *pc)
897 struct fde_vector *vec = ob->u.sort;
898 int encoding = ob->s.b.encoding;
899 _Unwind_Ptr base = base_from_object (encoding, ob);
900 size_t lo, hi;
902 for (lo = 0, hi = vec->count; lo < hi; )
904 size_t i = (lo + hi) / 2;
905 const fde *f = vec->array[i];
906 _Unwind_Ptr pc_begin, pc_range;
907 const unsigned char *p;
909 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
910 &pc_begin);
911 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
913 if ((_Unwind_Ptr) pc < pc_begin)
914 hi = i;
915 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
916 lo = i + 1;
917 else
918 return f;
921 return NULL;
924 static inline const fde *
925 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
927 struct fde_vector *vec = ob->u.sort;
928 size_t lo, hi;
930 for (lo = 0, hi = vec->count; lo < hi; )
932 size_t i = (lo + hi) / 2;
933 const fde *f = vec->array[i];
934 _Unwind_Ptr pc_begin, pc_range;
935 const unsigned char *p;
936 int encoding;
938 encoding = get_fde_encoding (f);
939 p = read_encoded_value_with_base (encoding,
940 base_from_object (encoding, ob),
941 f->pc_begin, &pc_begin);
942 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
944 if ((_Unwind_Ptr) pc < pc_begin)
945 hi = i;
946 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
947 lo = i + 1;
948 else
949 return f;
952 return NULL;
955 static const fde *
956 search_object (struct object* ob, void *pc)
958 /* If the data hasn't been sorted, try to do this now. We may have
959 more memory available than last time we tried. */
960 if (! ob->s.b.sorted)
962 init_object (ob);
964 /* Despite the above comment, the normal reason to get here is
965 that we've not processed this object before. A quick range
966 check is in order. */
967 if (pc < ob->pc_begin)
968 return NULL;
971 if (ob->s.b.sorted)
973 if (ob->s.b.mixed_encoding)
974 return binary_search_mixed_encoding_fdes (ob, pc);
975 else if (ob->s.b.encoding == DW_EH_PE_absptr)
976 return binary_search_unencoded_fdes (ob, pc);
977 else
978 return binary_search_single_encoding_fdes (ob, pc);
980 else
982 /* Long slow laborious linear search, cos we've no memory. */
983 if (ob->s.b.from_array)
985 fde **p;
986 for (p = ob->u.array; *p ; p++)
988 const fde *f = linear_search_fdes (ob, *p, pc);
989 if (f)
990 return f;
992 return NULL;
994 else
995 return linear_search_fdes (ob, ob->u.single, pc);
999 const fde *
1000 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
1002 struct object *ob;
1003 const fde *f = NULL;
1005 init_object_mutex_once ();
1006 __gthread_mutex_lock (&object_mutex);
1008 /* Linear search through the classified objects, to find the one
1009 containing the pc. Note that pc_begin is sorted descending, and
1010 we expect objects to be non-overlapping. */
1011 for (ob = seen_objects; ob; ob = ob->next)
1012 if (pc >= ob->pc_begin)
1014 f = search_object (ob, pc);
1015 if (f)
1016 goto fini;
1017 break;
1020 /* Classify and search the objects we've not yet processed. */
1021 while ((ob = unseen_objects))
1023 struct object **p;
1025 unseen_objects = ob->next;
1026 f = search_object (ob, pc);
1028 /* Insert the object into the classified list. */
1029 for (p = &seen_objects; *p ; p = &(*p)->next)
1030 if ((*p)->pc_begin < ob->pc_begin)
1031 break;
1032 ob->next = *p;
1033 *p = ob;
1035 if (f)
1036 goto fini;
1039 fini:
1040 __gthread_mutex_unlock (&object_mutex);
1042 if (f)
1044 int encoding;
1045 _Unwind_Ptr func;
1047 bases->tbase = ob->tbase;
1048 bases->dbase = ob->dbase;
1050 encoding = ob->s.b.encoding;
1051 if (ob->s.b.mixed_encoding)
1052 encoding = get_fde_encoding (f);
1053 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1054 f->pc_begin, &func);
1055 bases->func = (void *) func;
1058 return f;