sh.c (shift_insns_rtx, [...]): Truncate shift counts to avoid out-of-bounds array...
[official-gcc.git] / gcc / unwind-dw2-fde.c
blobfcac25f7259955357e363cb372a7ebe60446d5f5
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
3 2009 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #ifndef _Unwind_Find_FDE
28 #include "tconfig.h"
29 #include "tsystem.h"
30 #include "coretypes.h"
31 #include "tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #endif
40 /* The unseen_objects list contains objects that have been registered
41 but not yet categorized in any way. The seen_objects list has had
42 its pc_begin and count fields initialized at minimum, and is sorted
43 by decreasing value of pc_begin. */
44 static struct object *unseen_objects;
45 static struct object *seen_objects;
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
49 #else
50 static __gthread_mutex_t object_mutex;
51 #endif
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
54 static void
55 init_object_mutex (void)
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
60 static void
61 init_object_mutex_once (void)
63 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
64 __gthread_once (&once, init_object_mutex);
66 #else
67 #define init_object_mutex_once()
68 #endif
70 /* Called from crtbegin.o to register the unwind info for an object. */
72 void
73 __register_frame_info_bases (const void *begin, struct object *ob,
74 void *tbase, void *dbase)
76 /* If .eh_frame is empty, don't register at all. */
77 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
78 return;
80 ob->pc_begin = (void *)-1;
81 ob->tbase = tbase;
82 ob->dbase = dbase;
83 ob->u.single = begin;
84 ob->s.i = 0;
85 ob->s.b.encoding = DW_EH_PE_omit;
86 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
87 ob->fde_end = NULL;
88 #endif
90 init_object_mutex_once ();
91 __gthread_mutex_lock (&object_mutex);
93 ob->next = unseen_objects;
94 unseen_objects = ob;
96 __gthread_mutex_unlock (&object_mutex);
99 void
100 __register_frame_info (const void *begin, struct object *ob)
102 __register_frame_info_bases (begin, ob, 0, 0);
105 void
106 __register_frame (void *begin)
108 struct object *ob;
110 /* If .eh_frame is empty, don't register at all. */
111 if (*(uword *) begin == 0)
112 return;
114 ob = malloc (sizeof (struct object));
115 __register_frame_info (begin, ob);
118 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
119 for different translation units. Called from the file generated by
120 collect2. */
122 void
123 __register_frame_info_table_bases (void *begin, struct object *ob,
124 void *tbase, void *dbase)
126 ob->pc_begin = (void *)-1;
127 ob->tbase = tbase;
128 ob->dbase = dbase;
129 ob->u.array = begin;
130 ob->s.i = 0;
131 ob->s.b.from_array = 1;
132 ob->s.b.encoding = DW_EH_PE_omit;
134 init_object_mutex_once ();
135 __gthread_mutex_lock (&object_mutex);
137 ob->next = unseen_objects;
138 unseen_objects = ob;
140 __gthread_mutex_unlock (&object_mutex);
143 void
144 __register_frame_info_table (void *begin, struct object *ob)
146 __register_frame_info_table_bases (begin, ob, 0, 0);
149 void
150 __register_frame_table (void *begin)
152 struct object *ob = malloc (sizeof (struct object));
153 __register_frame_info_table (begin, ob);
156 /* Called from crtbegin.o to deregister the unwind info for an object. */
157 /* ??? Glibc has for a while now exported __register_frame_info and
158 __deregister_frame_info. If we call __register_frame_info_bases
159 from crtbegin (wherein it is declared weak), and this object does
160 not get pulled from libgcc.a for other reasons, then the
161 invocation of __deregister_frame_info will be resolved from glibc.
162 Since the registration did not happen there, we'll die.
164 Therefore, declare a new deregistration entry point that does the
165 exact same thing, but will resolve to the same library as
166 implements __register_frame_info_bases. */
168 void *
169 __deregister_frame_info_bases (const void *begin)
171 struct object **p;
172 struct object *ob = 0;
174 /* If .eh_frame is empty, we haven't registered. */
175 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
176 return ob;
178 init_object_mutex_once ();
179 __gthread_mutex_lock (&object_mutex);
181 for (p = &unseen_objects; *p ; p = &(*p)->next)
182 if ((*p)->u.single == begin)
184 ob = *p;
185 *p = ob->next;
186 goto out;
189 for (p = &seen_objects; *p ; p = &(*p)->next)
190 if ((*p)->s.b.sorted)
192 if ((*p)->u.sort->orig_data == begin)
194 ob = *p;
195 *p = ob->next;
196 free (ob->u.sort);
197 goto out;
200 else
202 if ((*p)->u.single == begin)
204 ob = *p;
205 *p = ob->next;
206 goto out;
210 out:
211 __gthread_mutex_unlock (&object_mutex);
212 gcc_assert (ob);
213 return (void *) ob;
216 void *
217 __deregister_frame_info (const void *begin)
219 return __deregister_frame_info_bases (begin);
222 void
223 __deregister_frame (void *begin)
225 /* If .eh_frame is empty, we haven't registered. */
226 if (*(uword *) begin != 0)
227 free (__deregister_frame_info (begin));
231 /* Like base_of_encoded_value, but take the base from a struct object
232 instead of an _Unwind_Context. */
234 static _Unwind_Ptr
235 base_from_object (unsigned char encoding, struct object *ob)
237 if (encoding == DW_EH_PE_omit)
238 return 0;
240 switch (encoding & 0x70)
242 case DW_EH_PE_absptr:
243 case DW_EH_PE_pcrel:
244 case DW_EH_PE_aligned:
245 return 0;
247 case DW_EH_PE_textrel:
248 return (_Unwind_Ptr) ob->tbase;
249 case DW_EH_PE_datarel:
250 return (_Unwind_Ptr) ob->dbase;
251 default:
252 gcc_unreachable ();
256 /* Return the FDE pointer encoding from the CIE. */
257 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
259 static int
260 get_cie_encoding (const struct dwarf_cie *cie)
262 const unsigned char *aug, *p;
263 _Unwind_Ptr dummy;
264 _uleb128_t utmp;
265 _sleb128_t stmp;
267 aug = cie->augmentation;
268 if (aug[0] != 'z')
269 return DW_EH_PE_absptr;
271 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
272 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
273 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
274 if (cie->version == 1) /* Skip return address column. */
275 p++;
276 else
277 p = read_uleb128 (p, &utmp);
279 aug++; /* Skip 'z' */
280 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
281 while (1)
283 /* This is what we're looking for. */
284 if (*aug == 'R')
285 return *p;
286 /* Personality encoding and pointer. */
287 else if (*aug == 'P')
289 /* ??? Avoid dereferencing indirect pointers, since we're
290 faking the base address. Gotta keep DW_EH_PE_aligned
291 intact, however. */
292 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
294 /* LSDA encoding. */
295 else if (*aug == 'L')
296 p++;
297 /* Otherwise end of string, or unknown augmentation. */
298 else
299 return DW_EH_PE_absptr;
300 aug++;
304 static inline int
305 get_fde_encoding (const struct dwarf_fde *f)
307 return get_cie_encoding (get_cie (f));
311 /* Sorting an array of FDEs by address.
312 (Ideally we would have the linker sort the FDEs so we don't have to do
313 it at run time. But the linkers are not yet prepared for this.) */
315 /* Comparison routines. Three variants of increasing complexity. */
317 static int
318 fde_unencoded_compare (struct object *ob __attribute__((unused)),
319 const fde *x, const fde *y)
321 const _Unwind_Ptr x_ptr = *(const _Unwind_Ptr *) x->pc_begin;
322 const _Unwind_Ptr y_ptr = *(const _Unwind_Ptr *) y->pc_begin;
324 if (x_ptr > y_ptr)
325 return 1;
326 if (x_ptr < y_ptr)
327 return -1;
328 return 0;
331 static int
332 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
334 _Unwind_Ptr base, x_ptr, y_ptr;
336 base = base_from_object (ob->s.b.encoding, ob);
337 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
338 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
340 if (x_ptr > y_ptr)
341 return 1;
342 if (x_ptr < y_ptr)
343 return -1;
344 return 0;
347 static int
348 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
350 int x_encoding, y_encoding;
351 _Unwind_Ptr x_ptr, y_ptr;
353 x_encoding = get_fde_encoding (x);
354 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
355 x->pc_begin, &x_ptr);
357 y_encoding = get_fde_encoding (y);
358 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
359 y->pc_begin, &y_ptr);
361 if (x_ptr > y_ptr)
362 return 1;
363 if (x_ptr < y_ptr)
364 return -1;
365 return 0;
368 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
371 /* This is a special mix of insertion sort and heap sort, optimized for
372 the data sets that actually occur. They look like
373 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
374 I.e. a linearly increasing sequence (coming from functions in the text
375 section), with additionally a few unordered elements (coming from functions
376 in gnu_linkonce sections) whose values are higher than the values in the
377 surrounding linear sequence (but not necessarily higher than the values
378 at the end of the linear sequence!).
379 The worst-case total run time is O(N) + O(n log (n)), where N is the
380 total number of FDEs and n is the number of erratic ones. */
382 struct fde_accumulator
384 struct fde_vector *linear;
385 struct fde_vector *erratic;
388 static inline int
389 start_fde_sort (struct fde_accumulator *accu, size_t count)
391 size_t size;
392 if (! count)
393 return 0;
395 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
396 if ((accu->linear = malloc (size)))
398 accu->linear->count = 0;
399 if ((accu->erratic = malloc (size)))
400 accu->erratic->count = 0;
401 return 1;
403 else
404 return 0;
407 static inline void
408 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
410 if (accu->linear)
411 accu->linear->array[accu->linear->count++] = this_fde;
414 /* Split LINEAR into a linear sequence with low values and an erratic
415 sequence with high values, put the linear one (of longest possible
416 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
418 Because the longest linear sequence we are trying to locate within the
419 incoming LINEAR array can be interspersed with (high valued) erratic
420 entries. We construct a chain indicating the sequenced entries.
421 To avoid having to allocate this chain, we overlay it onto the space of
422 the ERRATIC array during construction. A final pass iterates over the
423 chain to determine what should be placed in the ERRATIC array, and
424 what is the linear sequence. This overlay is safe from aliasing. */
426 static inline void
427 fde_split (struct object *ob, fde_compare_t fde_compare,
428 struct fde_vector *linear, struct fde_vector *erratic)
430 static const fde *marker;
431 size_t count = linear->count;
432 const fde *const *chain_end = &marker;
433 size_t i, j, k;
435 /* This should optimize out, but it is wise to make sure this assumption
436 is correct. Should these have different sizes, we cannot cast between
437 them and the overlaying onto ERRATIC will not work. */
438 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
440 for (i = 0; i < count; i++)
442 const fde *const *probe;
444 for (probe = chain_end;
445 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
446 probe = chain_end)
448 chain_end = (const fde *const*) erratic->array[probe - linear->array];
449 erratic->array[probe - linear->array] = NULL;
451 erratic->array[i] = (const fde *) chain_end;
452 chain_end = &linear->array[i];
455 /* Each entry in LINEAR which is part of the linear sequence we have
456 discovered will correspond to a non-NULL entry in the chain we built in
457 the ERRATIC array. */
458 for (i = j = k = 0; i < count; i++)
459 if (erratic->array[i])
460 linear->array[j++] = linear->array[i];
461 else
462 erratic->array[k++] = linear->array[i];
463 linear->count = j;
464 erratic->count = k;
467 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
469 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
470 for the first (root) node; push it down to its rightful place. */
472 static void
473 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
474 int lo, int hi)
476 int i, j;
478 for (i = lo, j = 2*i+1;
479 j < hi;
480 j = 2*i+1)
482 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
483 ++j;
485 if (fde_compare (ob, a[i], a[j]) < 0)
487 SWAP (a[i], a[j]);
488 i = j;
490 else
491 break;
495 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
496 use a name that does not conflict. */
498 static void
499 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
500 struct fde_vector *erratic)
502 /* For a description of this algorithm, see:
503 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
504 p. 60-61. */
505 const fde ** a = erratic->array;
506 /* A portion of the array is called a "heap" if for all i>=0:
507 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
508 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
509 size_t n = erratic->count;
510 int m;
512 /* Expand our heap incrementally from the end of the array, heapifying
513 each resulting semi-heap as we go. After each step, a[m] is the top
514 of a heap. */
515 for (m = n/2-1; m >= 0; --m)
516 frame_downheap (ob, fde_compare, a, m, n);
518 /* Shrink our heap incrementally from the end of the array, first
519 swapping out the largest element a[0] and then re-heapifying the
520 resulting semi-heap. After each step, a[0..m) is a heap. */
521 for (m = n-1; m >= 1; --m)
523 SWAP (a[0], a[m]);
524 frame_downheap (ob, fde_compare, a, 0, m);
526 #undef SWAP
529 /* Merge V1 and V2, both sorted, and put the result into V1. */
530 static inline void
531 fde_merge (struct object *ob, fde_compare_t fde_compare,
532 struct fde_vector *v1, struct fde_vector *v2)
534 size_t i1, i2;
535 const fde * fde2;
537 i2 = v2->count;
538 if (i2 > 0)
540 i1 = v1->count;
543 i2--;
544 fde2 = v2->array[i2];
545 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
547 v1->array[i1+i2] = v1->array[i1-1];
548 i1--;
550 v1->array[i1+i2] = fde2;
552 while (i2 > 0);
553 v1->count += v2->count;
557 static inline void
558 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
560 fde_compare_t fde_compare;
562 gcc_assert (!accu->linear || accu->linear->count == count);
564 if (ob->s.b.mixed_encoding)
565 fde_compare = fde_mixed_encoding_compare;
566 else if (ob->s.b.encoding == DW_EH_PE_absptr)
567 fde_compare = fde_unencoded_compare;
568 else
569 fde_compare = fde_single_encoding_compare;
571 if (accu->erratic)
573 fde_split (ob, fde_compare, accu->linear, accu->erratic);
574 gcc_assert (accu->linear->count + accu->erratic->count == count);
575 frame_heapsort (ob, fde_compare, accu->erratic);
576 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
577 free (accu->erratic);
579 else
581 /* We've not managed to malloc an erratic array,
582 so heap sort in the linear one. */
583 frame_heapsort (ob, fde_compare, accu->linear);
588 /* Update encoding, mixed_encoding, and pc_begin for OB for the
589 fde array beginning at THIS_FDE. Return the number of fdes
590 encountered along the way. */
592 static size_t
593 classify_object_over_fdes (struct object *ob, const fde *this_fde)
595 const struct dwarf_cie *last_cie = 0;
596 size_t count = 0;
597 int encoding = DW_EH_PE_absptr;
598 _Unwind_Ptr base = 0;
600 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
602 const struct dwarf_cie *this_cie;
603 _Unwind_Ptr mask, pc_begin;
605 /* Skip CIEs. */
606 if (this_fde->CIE_delta == 0)
607 continue;
609 /* Determine the encoding for this FDE. Note mixed encoded
610 objects for later. */
611 this_cie = get_cie (this_fde);
612 if (this_cie != last_cie)
614 last_cie = this_cie;
615 encoding = get_cie_encoding (this_cie);
616 base = base_from_object (encoding, ob);
617 if (ob->s.b.encoding == DW_EH_PE_omit)
618 ob->s.b.encoding = encoding;
619 else if (ob->s.b.encoding != encoding)
620 ob->s.b.mixed_encoding = 1;
623 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
624 &pc_begin);
626 /* Take care to ignore link-once functions that were removed.
627 In these cases, the function address will be NULL, but if
628 the encoding is smaller than a pointer a true NULL may not
629 be representable. Assume 0 in the representable bits is NULL. */
630 mask = size_of_encoded_value (encoding);
631 if (mask < sizeof (void *))
632 mask = (1L << (mask << 3)) - 1;
633 else
634 mask = -1;
636 if ((pc_begin & mask) == 0)
637 continue;
639 count += 1;
640 if ((void *) pc_begin < ob->pc_begin)
641 ob->pc_begin = (void *) pc_begin;
644 return count;
647 static void
648 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
650 const struct dwarf_cie *last_cie = 0;
651 int encoding = ob->s.b.encoding;
652 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
654 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
656 const struct dwarf_cie *this_cie;
658 /* Skip CIEs. */
659 if (this_fde->CIE_delta == 0)
660 continue;
662 if (ob->s.b.mixed_encoding)
664 /* Determine the encoding for this FDE. Note mixed encoded
665 objects for later. */
666 this_cie = get_cie (this_fde);
667 if (this_cie != last_cie)
669 last_cie = this_cie;
670 encoding = get_cie_encoding (this_cie);
671 base = base_from_object (encoding, ob);
675 if (encoding == DW_EH_PE_absptr)
677 if (*(const _Unwind_Ptr *) this_fde->pc_begin == 0)
678 continue;
680 else
682 _Unwind_Ptr pc_begin, mask;
684 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
685 &pc_begin);
687 /* Take care to ignore link-once functions that were removed.
688 In these cases, the function address will be NULL, but if
689 the encoding is smaller than a pointer a true NULL may not
690 be representable. Assume 0 in the representable bits is NULL. */
691 mask = size_of_encoded_value (encoding);
692 if (mask < sizeof (void *))
693 mask = (1L << (mask << 3)) - 1;
694 else
695 mask = -1;
697 if ((pc_begin & mask) == 0)
698 continue;
701 fde_insert (accu, this_fde);
705 /* Set up a sorted array of pointers to FDEs for a loaded object. We
706 count up the entries before allocating the array because it's likely to
707 be faster. We can be called multiple times, should we have failed to
708 allocate a sorted fde array on a previous occasion. */
710 static inline void
711 init_object (struct object* ob)
713 struct fde_accumulator accu;
714 size_t count;
716 count = ob->s.b.count;
717 if (count == 0)
719 if (ob->s.b.from_array)
721 fde **p = ob->u.array;
722 for (count = 0; *p; ++p)
723 count += classify_object_over_fdes (ob, *p);
725 else
726 count = classify_object_over_fdes (ob, ob->u.single);
728 /* The count field we have in the main struct object is somewhat
729 limited, but should suffice for virtually all cases. If the
730 counted value doesn't fit, re-write a zero. The worst that
731 happens is that we re-count next time -- admittedly non-trivial
732 in that this implies some 2M fdes, but at least we function. */
733 ob->s.b.count = count;
734 if (ob->s.b.count != count)
735 ob->s.b.count = 0;
738 if (!start_fde_sort (&accu, count))
739 return;
741 if (ob->s.b.from_array)
743 fde **p;
744 for (p = ob->u.array; *p; ++p)
745 add_fdes (ob, &accu, *p);
747 else
748 add_fdes (ob, &accu, ob->u.single);
750 end_fde_sort (ob, &accu, count);
752 /* Save the original fde pointer, since this is the key by which the
753 DSO will deregister the object. */
754 accu.linear->orig_data = ob->u.single;
755 ob->u.sort = accu.linear;
757 ob->s.b.sorted = 1;
760 /* A linear search through a set of FDEs for the given PC. This is
761 used when there was insufficient memory to allocate and sort an
762 array. */
764 static const fde *
765 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
767 const struct dwarf_cie *last_cie = 0;
768 int encoding = ob->s.b.encoding;
769 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
771 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
773 const struct dwarf_cie *this_cie;
774 _Unwind_Ptr pc_begin, pc_range;
776 /* Skip CIEs. */
777 if (this_fde->CIE_delta == 0)
778 continue;
780 if (ob->s.b.mixed_encoding)
782 /* Determine the encoding for this FDE. Note mixed encoded
783 objects for later. */
784 this_cie = get_cie (this_fde);
785 if (this_cie != last_cie)
787 last_cie = this_cie;
788 encoding = get_cie_encoding (this_cie);
789 base = base_from_object (encoding, ob);
793 if (encoding == DW_EH_PE_absptr)
795 pc_begin = ((const _Unwind_Ptr *) this_fde->pc_begin)[0];
796 pc_range = ((const _Unwind_Ptr *) this_fde->pc_begin)[1];
797 if (pc_begin == 0)
798 continue;
800 else
802 _Unwind_Ptr mask;
803 const unsigned char *p;
805 p = read_encoded_value_with_base (encoding, base,
806 this_fde->pc_begin, &pc_begin);
807 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
809 /* Take care to ignore link-once functions that were removed.
810 In these cases, the function address will be NULL, but if
811 the encoding is smaller than a pointer a true NULL may not
812 be representable. Assume 0 in the representable bits is NULL. */
813 mask = size_of_encoded_value (encoding);
814 if (mask < sizeof (void *))
815 mask = (1L << (mask << 3)) - 1;
816 else
817 mask = -1;
819 if ((pc_begin & mask) == 0)
820 continue;
823 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
824 return this_fde;
827 return NULL;
830 /* Binary search for an FDE containing the given PC. Here are three
831 implementations of increasing complexity. */
833 static inline const fde *
834 binary_search_unencoded_fdes (struct object *ob, void *pc)
836 struct fde_vector *vec = ob->u.sort;
837 size_t lo, hi;
839 for (lo = 0, hi = vec->count; lo < hi; )
841 size_t i = (lo + hi) / 2;
842 const fde *const f = vec->array[i];
843 const void *pc_begin = ((const void *const*) f->pc_begin)[0];
844 const uaddr pc_range = ((const uaddr *) f->pc_begin)[1];
846 if (pc < pc_begin)
847 hi = i;
848 else if (pc >= pc_begin + pc_range)
849 lo = i + 1;
850 else
851 return f;
854 return NULL;
857 static inline const fde *
858 binary_search_single_encoding_fdes (struct object *ob, void *pc)
860 struct fde_vector *vec = ob->u.sort;
861 int encoding = ob->s.b.encoding;
862 _Unwind_Ptr base = base_from_object (encoding, ob);
863 size_t lo, hi;
865 for (lo = 0, hi = vec->count; lo < hi; )
867 size_t i = (lo + hi) / 2;
868 const fde *f = vec->array[i];
869 _Unwind_Ptr pc_begin, pc_range;
870 const unsigned char *p;
872 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
873 &pc_begin);
874 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
876 if ((_Unwind_Ptr) pc < pc_begin)
877 hi = i;
878 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
879 lo = i + 1;
880 else
881 return f;
884 return NULL;
887 static inline const fde *
888 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
890 struct fde_vector *vec = ob->u.sort;
891 size_t lo, hi;
893 for (lo = 0, hi = vec->count; lo < hi; )
895 size_t i = (lo + hi) / 2;
896 const fde *f = vec->array[i];
897 _Unwind_Ptr pc_begin, pc_range;
898 const unsigned char *p;
899 int encoding;
901 encoding = get_fde_encoding (f);
902 p = read_encoded_value_with_base (encoding,
903 base_from_object (encoding, ob),
904 f->pc_begin, &pc_begin);
905 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
907 if ((_Unwind_Ptr) pc < pc_begin)
908 hi = i;
909 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
910 lo = i + 1;
911 else
912 return f;
915 return NULL;
918 static const fde *
919 search_object (struct object* ob, void *pc)
921 /* If the data hasn't been sorted, try to do this now. We may have
922 more memory available than last time we tried. */
923 if (! ob->s.b.sorted)
925 init_object (ob);
927 /* Despite the above comment, the normal reason to get here is
928 that we've not processed this object before. A quick range
929 check is in order. */
930 if (pc < ob->pc_begin)
931 return NULL;
934 if (ob->s.b.sorted)
936 if (ob->s.b.mixed_encoding)
937 return binary_search_mixed_encoding_fdes (ob, pc);
938 else if (ob->s.b.encoding == DW_EH_PE_absptr)
939 return binary_search_unencoded_fdes (ob, pc);
940 else
941 return binary_search_single_encoding_fdes (ob, pc);
943 else
945 /* Long slow laborious linear search, cos we've no memory. */
946 if (ob->s.b.from_array)
948 fde **p;
949 for (p = ob->u.array; *p ; p++)
951 const fde *f = linear_search_fdes (ob, *p, pc);
952 if (f)
953 return f;
955 return NULL;
957 else
958 return linear_search_fdes (ob, ob->u.single, pc);
962 const fde *
963 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
965 struct object *ob;
966 const fde *f = NULL;
968 init_object_mutex_once ();
969 __gthread_mutex_lock (&object_mutex);
971 /* Linear search through the classified objects, to find the one
972 containing the pc. Note that pc_begin is sorted descending, and
973 we expect objects to be non-overlapping. */
974 for (ob = seen_objects; ob; ob = ob->next)
975 if (pc >= ob->pc_begin)
977 f = search_object (ob, pc);
978 if (f)
979 goto fini;
980 break;
983 /* Classify and search the objects we've not yet processed. */
984 while ((ob = unseen_objects))
986 struct object **p;
988 unseen_objects = ob->next;
989 f = search_object (ob, pc);
991 /* Insert the object into the classified list. */
992 for (p = &seen_objects; *p ; p = &(*p)->next)
993 if ((*p)->pc_begin < ob->pc_begin)
994 break;
995 ob->next = *p;
996 *p = ob;
998 if (f)
999 goto fini;
1002 fini:
1003 __gthread_mutex_unlock (&object_mutex);
1005 if (f)
1007 int encoding;
1008 _Unwind_Ptr func;
1010 bases->tbase = ob->tbase;
1011 bases->dbase = ob->dbase;
1013 encoding = ob->s.b.encoding;
1014 if (ob->s.b.mixed_encoding)
1015 encoding = get_fde_encoding (f);
1016 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1017 f->pc_begin, &func);
1018 bases->func = (void *) func;
1021 return f;