c-decl.c (merge_decls): Use DECL_USER_ALIGN() on olddecl to update the respective...
[official-gcc.git] / gcc / unwind-dw2-fde.c
blobac2e0d465aec25b3498666e204dc759b7c0852cc
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
30 02110-1301, USA. */
32 #ifndef _Unwind_Find_FDE
33 #include "tconfig.h"
34 #include "tsystem.h"
35 #include "coretypes.h"
36 #include "tm.h"
37 #include "dwarf2.h"
38 #include "unwind.h"
39 #define NO_BASE_OF_ENCODED_VALUE
40 #include "unwind-pe.h"
41 #include "unwind-dw2-fde.h"
42 #include "gthr.h"
43 #endif
45 /* The unseen_objects list contains objects that have been registered
46 but not yet categorized in any way. The seen_objects list has had
47 its pc_begin and count fields initialized at minimum, and is sorted
48 by decreasing value of pc_begin. */
49 static struct object *unseen_objects;
50 static struct object *seen_objects;
52 #ifdef __GTHREAD_MUTEX_INIT
53 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
54 #else
55 static __gthread_mutex_t object_mutex;
56 #endif
58 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
59 static void
60 init_object_mutex (void)
62 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
65 static void
66 init_object_mutex_once (void)
68 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
69 __gthread_once (&once, init_object_mutex);
71 #else
72 #define init_object_mutex_once()
73 #endif
75 /* Called from crtbegin.o to register the unwind info for an object. */
77 void
78 __register_frame_info_bases (const void *begin, struct object *ob,
79 void *tbase, void *dbase)
81 /* If .eh_frame is empty, don't register at all. */
82 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
83 return;
85 ob->pc_begin = (void *)-1;
86 ob->tbase = tbase;
87 ob->dbase = dbase;
88 ob->u.single = begin;
89 ob->s.i = 0;
90 ob->s.b.encoding = DW_EH_PE_omit;
91 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
92 ob->fde_end = NULL;
93 #endif
95 init_object_mutex_once ();
96 __gthread_mutex_lock (&object_mutex);
98 ob->next = unseen_objects;
99 unseen_objects = ob;
101 __gthread_mutex_unlock (&object_mutex);
104 void
105 __register_frame_info (const void *begin, struct object *ob)
107 __register_frame_info_bases (begin, ob, 0, 0);
110 void
111 __register_frame (void *begin)
113 struct object *ob;
115 /* If .eh_frame is empty, don't register at all. */
116 if (*(uword *) begin == 0)
117 return;
119 ob = malloc (sizeof (struct object));
120 __register_frame_info (begin, ob);
123 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
124 for different translation units. Called from the file generated by
125 collect2. */
127 void
128 __register_frame_info_table_bases (void *begin, struct object *ob,
129 void *tbase, void *dbase)
131 ob->pc_begin = (void *)-1;
132 ob->tbase = tbase;
133 ob->dbase = dbase;
134 ob->u.array = begin;
135 ob->s.i = 0;
136 ob->s.b.from_array = 1;
137 ob->s.b.encoding = DW_EH_PE_omit;
139 init_object_mutex_once ();
140 __gthread_mutex_lock (&object_mutex);
142 ob->next = unseen_objects;
143 unseen_objects = ob;
145 __gthread_mutex_unlock (&object_mutex);
148 void
149 __register_frame_info_table (void *begin, struct object *ob)
151 __register_frame_info_table_bases (begin, ob, 0, 0);
154 void
155 __register_frame_table (void *begin)
157 struct object *ob = malloc (sizeof (struct object));
158 __register_frame_info_table (begin, ob);
161 /* Called from crtbegin.o to deregister the unwind info for an object. */
162 /* ??? Glibc has for a while now exported __register_frame_info and
163 __deregister_frame_info. If we call __register_frame_info_bases
164 from crtbegin (wherein it is declared weak), and this object does
165 not get pulled from libgcc.a for other reasons, then the
166 invocation of __deregister_frame_info will be resolved from glibc.
167 Since the registration did not happen there, we'll die.
169 Therefore, declare a new deregistration entry point that does the
170 exact same thing, but will resolve to the same library as
171 implements __register_frame_info_bases. */
173 void *
174 __deregister_frame_info_bases (const void *begin)
176 struct object **p;
177 struct object *ob = 0;
179 /* If .eh_frame is empty, we haven't registered. */
180 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
181 return ob;
183 init_object_mutex_once ();
184 __gthread_mutex_lock (&object_mutex);
186 for (p = &unseen_objects; *p ; p = &(*p)->next)
187 if ((*p)->u.single == begin)
189 ob = *p;
190 *p = ob->next;
191 goto out;
194 for (p = &seen_objects; *p ; p = &(*p)->next)
195 if ((*p)->s.b.sorted)
197 if ((*p)->u.sort->orig_data == begin)
199 ob = *p;
200 *p = ob->next;
201 free (ob->u.sort);
202 goto out;
205 else
207 if ((*p)->u.single == begin)
209 ob = *p;
210 *p = ob->next;
211 goto out;
215 out:
216 __gthread_mutex_unlock (&object_mutex);
217 gcc_assert (ob);
218 return (void *) ob;
221 void *
222 __deregister_frame_info (const void *begin)
224 return __deregister_frame_info_bases (begin);
227 void
228 __deregister_frame (void *begin)
230 /* If .eh_frame is empty, we haven't registered. */
231 if (*(uword *) begin != 0)
232 free (__deregister_frame_info (begin));
236 /* Like base_of_encoded_value, but take the base from a struct object
237 instead of an _Unwind_Context. */
239 static _Unwind_Ptr
240 base_from_object (unsigned char encoding, struct object *ob)
242 if (encoding == DW_EH_PE_omit)
243 return 0;
245 switch (encoding & 0x70)
247 case DW_EH_PE_absptr:
248 case DW_EH_PE_pcrel:
249 case DW_EH_PE_aligned:
250 return 0;
252 case DW_EH_PE_textrel:
253 return (_Unwind_Ptr) ob->tbase;
254 case DW_EH_PE_datarel:
255 return (_Unwind_Ptr) ob->dbase;
256 default:
257 gcc_unreachable ();
261 /* Return the FDE pointer encoding from the CIE. */
262 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
264 static int
265 get_cie_encoding (const struct dwarf_cie *cie)
267 const unsigned char *aug, *p;
268 _Unwind_Ptr dummy;
269 _uleb128_t utmp;
270 _sleb128_t stmp;
272 aug = cie->augmentation;
273 if (aug[0] != 'z')
274 return DW_EH_PE_absptr;
276 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
277 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
278 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
279 if (cie->version == 1) /* Skip return address column. */
280 p++;
281 else
282 p = read_uleb128 (p, &utmp);
284 aug++; /* Skip 'z' */
285 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
286 while (1)
288 /* This is what we're looking for. */
289 if (*aug == 'R')
290 return *p;
291 /* Personality encoding and pointer. */
292 else if (*aug == 'P')
294 /* ??? Avoid dereferencing indirect pointers, since we're
295 faking the base address. Gotta keep DW_EH_PE_aligned
296 intact, however. */
297 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
299 /* LSDA encoding. */
300 else if (*aug == 'L')
301 p++;
302 /* Otherwise end of string, or unknown augmentation. */
303 else
304 return DW_EH_PE_absptr;
305 aug++;
309 static inline int
310 get_fde_encoding (const struct dwarf_fde *f)
312 return get_cie_encoding (get_cie (f));
316 /* Sorting an array of FDEs by address.
317 (Ideally we would have the linker sort the FDEs so we don't have to do
318 it at run time. But the linkers are not yet prepared for this.) */
320 /* Comparison routines. Three variants of increasing complexity. */
322 static int
323 fde_unencoded_compare (struct object *ob __attribute__((unused)),
324 const fde *x, const fde *y)
326 const _Unwind_Ptr x_ptr = *(const _Unwind_Ptr *) x->pc_begin;
327 const _Unwind_Ptr y_ptr = *(const _Unwind_Ptr *) y->pc_begin;
329 if (x_ptr > y_ptr)
330 return 1;
331 if (x_ptr < y_ptr)
332 return -1;
333 return 0;
336 static int
337 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
339 _Unwind_Ptr base, x_ptr, y_ptr;
341 base = base_from_object (ob->s.b.encoding, ob);
342 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
343 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
345 if (x_ptr > y_ptr)
346 return 1;
347 if (x_ptr < y_ptr)
348 return -1;
349 return 0;
352 static int
353 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
355 int x_encoding, y_encoding;
356 _Unwind_Ptr x_ptr, y_ptr;
358 x_encoding = get_fde_encoding (x);
359 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
360 x->pc_begin, &x_ptr);
362 y_encoding = get_fde_encoding (y);
363 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
364 y->pc_begin, &y_ptr);
366 if (x_ptr > y_ptr)
367 return 1;
368 if (x_ptr < y_ptr)
369 return -1;
370 return 0;
373 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
376 /* This is a special mix of insertion sort and heap sort, optimized for
377 the data sets that actually occur. They look like
378 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
379 I.e. a linearly increasing sequence (coming from functions in the text
380 section), with additionally a few unordered elements (coming from functions
381 in gnu_linkonce sections) whose values are higher than the values in the
382 surrounding linear sequence (but not necessarily higher than the values
383 at the end of the linear sequence!).
384 The worst-case total run time is O(N) + O(n log (n)), where N is the
385 total number of FDEs and n is the number of erratic ones. */
387 struct fde_accumulator
389 struct fde_vector *linear;
390 struct fde_vector *erratic;
393 static inline int
394 start_fde_sort (struct fde_accumulator *accu, size_t count)
396 size_t size;
397 if (! count)
398 return 0;
400 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
401 if ((accu->linear = malloc (size)))
403 accu->linear->count = 0;
404 if ((accu->erratic = malloc (size)))
405 accu->erratic->count = 0;
406 return 1;
408 else
409 return 0;
412 static inline void
413 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
415 if (accu->linear)
416 accu->linear->array[accu->linear->count++] = this_fde;
419 /* Split LINEAR into a linear sequence with low values and an erratic
420 sequence with high values, put the linear one (of longest possible
421 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
423 Because the longest linear sequence we are trying to locate within the
424 incoming LINEAR array can be interspersed with (high valued) erratic
425 entries. We construct a chain indicating the sequenced entries.
426 To avoid having to allocate this chain, we overlay it onto the space of
427 the ERRATIC array during construction. A final pass iterates over the
428 chain to determine what should be placed in the ERRATIC array, and
429 what is the linear sequence. This overlay is safe from aliasing. */
431 static inline void
432 fde_split (struct object *ob, fde_compare_t fde_compare,
433 struct fde_vector *linear, struct fde_vector *erratic)
435 static const fde *marker;
436 size_t count = linear->count;
437 const fde *const *chain_end = &marker;
438 size_t i, j, k;
440 /* This should optimize out, but it is wise to make sure this assumption
441 is correct. Should these have different sizes, we cannot cast between
442 them and the overlaying onto ERRATIC will not work. */
443 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
445 for (i = 0; i < count; i++)
447 const fde *const *probe;
449 for (probe = chain_end;
450 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
451 probe = chain_end)
453 chain_end = (const fde *const*) erratic->array[probe - linear->array];
454 erratic->array[probe - linear->array] = NULL;
456 erratic->array[i] = (const fde *) chain_end;
457 chain_end = &linear->array[i];
460 /* Each entry in LINEAR which is part of the linear sequence we have
461 discovered will correspond to a non-NULL entry in the chain we built in
462 the ERRATIC array. */
463 for (i = j = k = 0; i < count; i++)
464 if (erratic->array[i])
465 linear->array[j++] = linear->array[i];
466 else
467 erratic->array[k++] = linear->array[i];
468 linear->count = j;
469 erratic->count = k;
472 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
474 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
475 for the first (root) node; push it down to its rightful place. */
477 static void
478 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
479 int lo, int hi)
481 int i, j;
483 for (i = lo, j = 2*i+1;
484 j < hi;
485 j = 2*i+1)
487 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
488 ++j;
490 if (fde_compare (ob, a[i], a[j]) < 0)
492 SWAP (a[i], a[j]);
493 i = j;
495 else
496 break;
500 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
501 use a name that does not conflict. */
503 static void
504 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
505 struct fde_vector *erratic)
507 /* For a description of this algorithm, see:
508 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
509 p. 60-61. */
510 const fde ** a = erratic->array;
511 /* A portion of the array is called a "heap" if for all i>=0:
512 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
513 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
514 size_t n = erratic->count;
515 int m;
517 /* Expand our heap incrementally from the end of the array, heapifying
518 each resulting semi-heap as we go. After each step, a[m] is the top
519 of a heap. */
520 for (m = n/2-1; m >= 0; --m)
521 frame_downheap (ob, fde_compare, a, m, n);
523 /* Shrink our heap incrementally from the end of the array, first
524 swapping out the largest element a[0] and then re-heapifying the
525 resulting semi-heap. After each step, a[0..m) is a heap. */
526 for (m = n-1; m >= 1; --m)
528 SWAP (a[0], a[m]);
529 frame_downheap (ob, fde_compare, a, 0, m);
531 #undef SWAP
534 /* Merge V1 and V2, both sorted, and put the result into V1. */
535 static inline void
536 fde_merge (struct object *ob, fde_compare_t fde_compare,
537 struct fde_vector *v1, struct fde_vector *v2)
539 size_t i1, i2;
540 const fde * fde2;
542 i2 = v2->count;
543 if (i2 > 0)
545 i1 = v1->count;
548 i2--;
549 fde2 = v2->array[i2];
550 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
552 v1->array[i1+i2] = v1->array[i1-1];
553 i1--;
555 v1->array[i1+i2] = fde2;
557 while (i2 > 0);
558 v1->count += v2->count;
562 static inline void
563 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
565 fde_compare_t fde_compare;
567 gcc_assert (!accu->linear || accu->linear->count == count);
569 if (ob->s.b.mixed_encoding)
570 fde_compare = fde_mixed_encoding_compare;
571 else if (ob->s.b.encoding == DW_EH_PE_absptr)
572 fde_compare = fde_unencoded_compare;
573 else
574 fde_compare = fde_single_encoding_compare;
576 if (accu->erratic)
578 fde_split (ob, fde_compare, accu->linear, accu->erratic);
579 gcc_assert (accu->linear->count + accu->erratic->count == count);
580 frame_heapsort (ob, fde_compare, accu->erratic);
581 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
582 free (accu->erratic);
584 else
586 /* We've not managed to malloc an erratic array,
587 so heap sort in the linear one. */
588 frame_heapsort (ob, fde_compare, accu->linear);
593 /* Update encoding, mixed_encoding, and pc_begin for OB for the
594 fde array beginning at THIS_FDE. Return the number of fdes
595 encountered along the way. */
597 static size_t
598 classify_object_over_fdes (struct object *ob, const fde *this_fde)
600 const struct dwarf_cie *last_cie = 0;
601 size_t count = 0;
602 int encoding = DW_EH_PE_absptr;
603 _Unwind_Ptr base = 0;
605 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
607 const struct dwarf_cie *this_cie;
608 _Unwind_Ptr mask, pc_begin;
610 /* Skip CIEs. */
611 if (this_fde->CIE_delta == 0)
612 continue;
614 /* Determine the encoding for this FDE. Note mixed encoded
615 objects for later. */
616 this_cie = get_cie (this_fde);
617 if (this_cie != last_cie)
619 last_cie = this_cie;
620 encoding = get_cie_encoding (this_cie);
621 base = base_from_object (encoding, ob);
622 if (ob->s.b.encoding == DW_EH_PE_omit)
623 ob->s.b.encoding = encoding;
624 else if (ob->s.b.encoding != encoding)
625 ob->s.b.mixed_encoding = 1;
628 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
629 &pc_begin);
631 /* Take care to ignore link-once functions that were removed.
632 In these cases, the function address will be NULL, but if
633 the encoding is smaller than a pointer a true NULL may not
634 be representable. Assume 0 in the representable bits is NULL. */
635 mask = size_of_encoded_value (encoding);
636 if (mask < sizeof (void *))
637 mask = (1L << (mask << 3)) - 1;
638 else
639 mask = -1;
641 if ((pc_begin & mask) == 0)
642 continue;
644 count += 1;
645 if ((void *) pc_begin < ob->pc_begin)
646 ob->pc_begin = (void *) pc_begin;
649 return count;
652 static void
653 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
655 const struct dwarf_cie *last_cie = 0;
656 int encoding = ob->s.b.encoding;
657 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
659 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
661 const struct dwarf_cie *this_cie;
663 /* Skip CIEs. */
664 if (this_fde->CIE_delta == 0)
665 continue;
667 if (ob->s.b.mixed_encoding)
669 /* Determine the encoding for this FDE. Note mixed encoded
670 objects for later. */
671 this_cie = get_cie (this_fde);
672 if (this_cie != last_cie)
674 last_cie = this_cie;
675 encoding = get_cie_encoding (this_cie);
676 base = base_from_object (encoding, ob);
680 if (encoding == DW_EH_PE_absptr)
682 if (*(const _Unwind_Ptr *) this_fde->pc_begin == 0)
683 continue;
685 else
687 _Unwind_Ptr pc_begin, mask;
689 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
690 &pc_begin);
692 /* Take care to ignore link-once functions that were removed.
693 In these cases, the function address will be NULL, but if
694 the encoding is smaller than a pointer a true NULL may not
695 be representable. Assume 0 in the representable bits is NULL. */
696 mask = size_of_encoded_value (encoding);
697 if (mask < sizeof (void *))
698 mask = (1L << (mask << 3)) - 1;
699 else
700 mask = -1;
702 if ((pc_begin & mask) == 0)
703 continue;
706 fde_insert (accu, this_fde);
710 /* Set up a sorted array of pointers to FDEs for a loaded object. We
711 count up the entries before allocating the array because it's likely to
712 be faster. We can be called multiple times, should we have failed to
713 allocate a sorted fde array on a previous occasion. */
715 static inline void
716 init_object (struct object* ob)
718 struct fde_accumulator accu;
719 size_t count;
721 count = ob->s.b.count;
722 if (count == 0)
724 if (ob->s.b.from_array)
726 fde **p = ob->u.array;
727 for (count = 0; *p; ++p)
728 count += classify_object_over_fdes (ob, *p);
730 else
731 count = classify_object_over_fdes (ob, ob->u.single);
733 /* The count field we have in the main struct object is somewhat
734 limited, but should suffice for virtually all cases. If the
735 counted value doesn't fit, re-write a zero. The worst that
736 happens is that we re-count next time -- admittedly non-trivial
737 in that this implies some 2M fdes, but at least we function. */
738 ob->s.b.count = count;
739 if (ob->s.b.count != count)
740 ob->s.b.count = 0;
743 if (!start_fde_sort (&accu, count))
744 return;
746 if (ob->s.b.from_array)
748 fde **p;
749 for (p = ob->u.array; *p; ++p)
750 add_fdes (ob, &accu, *p);
752 else
753 add_fdes (ob, &accu, ob->u.single);
755 end_fde_sort (ob, &accu, count);
757 /* Save the original fde pointer, since this is the key by which the
758 DSO will deregister the object. */
759 accu.linear->orig_data = ob->u.single;
760 ob->u.sort = accu.linear;
762 ob->s.b.sorted = 1;
765 /* A linear search through a set of FDEs for the given PC. This is
766 used when there was insufficient memory to allocate and sort an
767 array. */
769 static const fde *
770 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
772 const struct dwarf_cie *last_cie = 0;
773 int encoding = ob->s.b.encoding;
774 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
776 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
778 const struct dwarf_cie *this_cie;
779 _Unwind_Ptr pc_begin, pc_range;
781 /* Skip CIEs. */
782 if (this_fde->CIE_delta == 0)
783 continue;
785 if (ob->s.b.mixed_encoding)
787 /* Determine the encoding for this FDE. Note mixed encoded
788 objects for later. */
789 this_cie = get_cie (this_fde);
790 if (this_cie != last_cie)
792 last_cie = this_cie;
793 encoding = get_cie_encoding (this_cie);
794 base = base_from_object (encoding, ob);
798 if (encoding == DW_EH_PE_absptr)
800 pc_begin = ((const _Unwind_Ptr *) this_fde->pc_begin)[0];
801 pc_range = ((const _Unwind_Ptr *) this_fde->pc_begin)[1];
802 if (pc_begin == 0)
803 continue;
805 else
807 _Unwind_Ptr mask;
808 const unsigned char *p;
810 p = read_encoded_value_with_base (encoding, base,
811 this_fde->pc_begin, &pc_begin);
812 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
814 /* Take care to ignore link-once functions that were removed.
815 In these cases, the function address will be NULL, but if
816 the encoding is smaller than a pointer a true NULL may not
817 be representable. Assume 0 in the representable bits is NULL. */
818 mask = size_of_encoded_value (encoding);
819 if (mask < sizeof (void *))
820 mask = (1L << (mask << 3)) - 1;
821 else
822 mask = -1;
824 if ((pc_begin & mask) == 0)
825 continue;
828 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
829 return this_fde;
832 return NULL;
835 /* Binary search for an FDE containing the given PC. Here are three
836 implementations of increasing complexity. */
838 static inline const fde *
839 binary_search_unencoded_fdes (struct object *ob, void *pc)
841 struct fde_vector *vec = ob->u.sort;
842 size_t lo, hi;
844 for (lo = 0, hi = vec->count; lo < hi; )
846 size_t i = (lo + hi) / 2;
847 const fde *const f = vec->array[i];
848 const void *pc_begin = ((const void *const*) f->pc_begin)[0];
849 const uaddr pc_range = ((const uaddr *) f->pc_begin)[1];
851 if (pc < pc_begin)
852 hi = i;
853 else if (pc >= pc_begin + pc_range)
854 lo = i + 1;
855 else
856 return f;
859 return NULL;
862 static inline const fde *
863 binary_search_single_encoding_fdes (struct object *ob, void *pc)
865 struct fde_vector *vec = ob->u.sort;
866 int encoding = ob->s.b.encoding;
867 _Unwind_Ptr base = base_from_object (encoding, ob);
868 size_t lo, hi;
870 for (lo = 0, hi = vec->count; lo < hi; )
872 size_t i = (lo + hi) / 2;
873 const fde *f = vec->array[i];
874 _Unwind_Ptr pc_begin, pc_range;
875 const unsigned char *p;
877 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
878 &pc_begin);
879 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
881 if ((_Unwind_Ptr) pc < pc_begin)
882 hi = i;
883 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
884 lo = i + 1;
885 else
886 return f;
889 return NULL;
892 static inline const fde *
893 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
895 struct fde_vector *vec = ob->u.sort;
896 size_t lo, hi;
898 for (lo = 0, hi = vec->count; lo < hi; )
900 size_t i = (lo + hi) / 2;
901 const fde *f = vec->array[i];
902 _Unwind_Ptr pc_begin, pc_range;
903 const unsigned char *p;
904 int encoding;
906 encoding = get_fde_encoding (f);
907 p = read_encoded_value_with_base (encoding,
908 base_from_object (encoding, ob),
909 f->pc_begin, &pc_begin);
910 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
912 if ((_Unwind_Ptr) pc < pc_begin)
913 hi = i;
914 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
915 lo = i + 1;
916 else
917 return f;
920 return NULL;
923 static const fde *
924 search_object (struct object* ob, void *pc)
926 /* If the data hasn't been sorted, try to do this now. We may have
927 more memory available than last time we tried. */
928 if (! ob->s.b.sorted)
930 init_object (ob);
932 /* Despite the above comment, the normal reason to get here is
933 that we've not processed this object before. A quick range
934 check is in order. */
935 if (pc < ob->pc_begin)
936 return NULL;
939 if (ob->s.b.sorted)
941 if (ob->s.b.mixed_encoding)
942 return binary_search_mixed_encoding_fdes (ob, pc);
943 else if (ob->s.b.encoding == DW_EH_PE_absptr)
944 return binary_search_unencoded_fdes (ob, pc);
945 else
946 return binary_search_single_encoding_fdes (ob, pc);
948 else
950 /* Long slow labourious linear search, cos we've no memory. */
951 if (ob->s.b.from_array)
953 fde **p;
954 for (p = ob->u.array; *p ; p++)
956 const fde *f = linear_search_fdes (ob, *p, pc);
957 if (f)
958 return f;
960 return NULL;
962 else
963 return linear_search_fdes (ob, ob->u.single, pc);
967 const fde *
968 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
970 struct object *ob;
971 const fde *f = NULL;
973 init_object_mutex_once ();
974 __gthread_mutex_lock (&object_mutex);
976 /* Linear search through the classified objects, to find the one
977 containing the pc. Note that pc_begin is sorted descending, and
978 we expect objects to be non-overlapping. */
979 for (ob = seen_objects; ob; ob = ob->next)
980 if (pc >= ob->pc_begin)
982 f = search_object (ob, pc);
983 if (f)
984 goto fini;
985 break;
988 /* Classify and search the objects we've not yet processed. */
989 while ((ob = unseen_objects))
991 struct object **p;
993 unseen_objects = ob->next;
994 f = search_object (ob, pc);
996 /* Insert the object into the classified list. */
997 for (p = &seen_objects; *p ; p = &(*p)->next)
998 if ((*p)->pc_begin < ob->pc_begin)
999 break;
1000 ob->next = *p;
1001 *p = ob;
1003 if (f)
1004 goto fini;
1007 fini:
1008 __gthread_mutex_unlock (&object_mutex);
1010 if (f)
1012 int encoding;
1013 _Unwind_Ptr func;
1015 bases->tbase = ob->tbase;
1016 bases->dbase = ob->dbase;
1018 encoding = ob->s.b.encoding;
1019 if (ob->s.b.mixed_encoding)
1020 encoding = get_fde_encoding (f);
1021 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1022 f->pc_begin, &func);
1023 bases->func = (void *) func;
1026 return f;