2007-03-01 Paul Brook <paul@codesourcery.com>
[official-gcc.git] / gcc / unwind-dw2-fde.c
blob880ba51bf3819ef91f64f4ef05f4382515be5d1f
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
30 02110-1301, USA. */
32 #ifndef _Unwind_Find_FDE
33 #include "tconfig.h"
34 #include "tsystem.h"
35 #include "coretypes.h"
36 #include "tm.h"
37 #include "dwarf2.h"
38 #include "unwind.h"
39 #define NO_BASE_OF_ENCODED_VALUE
40 #include "unwind-pe.h"
41 #include "unwind-dw2-fde.h"
42 #include "gthr.h"
43 #endif
45 /* The unseen_objects list contains objects that have been registered
46 but not yet categorized in any way. The seen_objects list has had
47 it's pc_begin and count fields initialized at minimum, and is sorted
48 by decreasing value of pc_begin. */
49 static struct object *unseen_objects;
50 static struct object *seen_objects;
52 #ifdef __GTHREAD_MUTEX_INIT
53 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
54 #else
55 static __gthread_mutex_t object_mutex;
56 #endif
58 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
59 static void
60 init_object_mutex (void)
62 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
65 static void
66 init_object_mutex_once (void)
68 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
69 __gthread_once (&once, init_object_mutex);
71 #else
72 #define init_object_mutex_once()
73 #endif
75 /* Called from crtbegin.o to register the unwind info for an object. */
77 void
78 __register_frame_info_bases (const void *begin, struct object *ob,
79 void *tbase, void *dbase)
81 /* If .eh_frame is empty, don't register at all. */
82 if ((uword *) begin == 0 || *(uword *) begin == 0)
83 return;
85 ob->pc_begin = (void *)-1;
86 ob->tbase = tbase;
87 ob->dbase = dbase;
88 ob->u.single = begin;
89 ob->s.i = 0;
90 ob->s.b.encoding = DW_EH_PE_omit;
91 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
92 ob->fde_end = NULL;
93 #endif
95 init_object_mutex_once ();
96 __gthread_mutex_lock (&object_mutex);
98 ob->next = unseen_objects;
99 unseen_objects = ob;
101 __gthread_mutex_unlock (&object_mutex);
104 void
105 __register_frame_info (const void *begin, struct object *ob)
107 __register_frame_info_bases (begin, ob, 0, 0);
110 void
111 __register_frame (void *begin)
113 struct object *ob;
115 /* If .eh_frame is empty, don't register at all. */
116 if (*(uword *) begin == 0)
117 return;
119 ob = malloc (sizeof (struct object));
120 __register_frame_info (begin, ob);
123 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
124 for different translation units. Called from the file generated by
125 collect2. */
127 void
128 __register_frame_info_table_bases (void *begin, struct object *ob,
129 void *tbase, void *dbase)
131 ob->pc_begin = (void *)-1;
132 ob->tbase = tbase;
133 ob->dbase = dbase;
134 ob->u.array = begin;
135 ob->s.i = 0;
136 ob->s.b.from_array = 1;
137 ob->s.b.encoding = DW_EH_PE_omit;
139 init_object_mutex_once ();
140 __gthread_mutex_lock (&object_mutex);
142 ob->next = unseen_objects;
143 unseen_objects = ob;
145 __gthread_mutex_unlock (&object_mutex);
148 void
149 __register_frame_info_table (void *begin, struct object *ob)
151 __register_frame_info_table_bases (begin, ob, 0, 0);
154 void
155 __register_frame_table (void *begin)
157 struct object *ob = malloc (sizeof (struct object));
158 __register_frame_info_table (begin, ob);
161 /* Called from crtbegin.o to deregister the unwind info for an object. */
162 /* ??? Glibc has for a while now exported __register_frame_info and
163 __deregister_frame_info. If we call __register_frame_info_bases
164 from crtbegin (wherein it is declared weak), and this object does
165 not get pulled from libgcc.a for other reasons, then the
166 invocation of __deregister_frame_info will be resolved from glibc.
167 Since the registration did not happen there, we'll die.
169 Therefore, declare a new deregistration entry point that does the
170 exact same thing, but will resolve to the same library as
171 implements __register_frame_info_bases. */
173 void *
174 __deregister_frame_info_bases (const void *begin)
176 struct object **p;
177 struct object *ob = 0;
179 /* If .eh_frame is empty, we haven't registered. */
180 if ((uword *) begin == 0 || *(uword *) begin == 0)
181 return ob;
183 init_object_mutex_once ();
184 __gthread_mutex_lock (&object_mutex);
186 for (p = &unseen_objects; *p ; p = &(*p)->next)
187 if ((*p)->u.single == begin)
189 ob = *p;
190 *p = ob->next;
191 goto out;
194 for (p = &seen_objects; *p ; p = &(*p)->next)
195 if ((*p)->s.b.sorted)
197 if ((*p)->u.sort->orig_data == begin)
199 ob = *p;
200 *p = ob->next;
201 free (ob->u.sort);
202 goto out;
205 else
207 if ((*p)->u.single == begin)
209 ob = *p;
210 *p = ob->next;
211 goto out;
215 out:
216 __gthread_mutex_unlock (&object_mutex);
217 gcc_assert (ob);
218 return (void *) ob;
221 void *
222 __deregister_frame_info (const void *begin)
224 return __deregister_frame_info_bases (begin);
227 void
228 __deregister_frame (void *begin)
230 /* If .eh_frame is empty, we haven't registered. */
231 if (*(uword *) begin != 0)
232 free (__deregister_frame_info (begin));
236 /* Like base_of_encoded_value, but take the base from a struct object
237 instead of an _Unwind_Context. */
239 static _Unwind_Ptr
240 base_from_object (unsigned char encoding, struct object *ob)
242 if (encoding == DW_EH_PE_omit)
243 return 0;
245 switch (encoding & 0x70)
247 case DW_EH_PE_absptr:
248 case DW_EH_PE_pcrel:
249 case DW_EH_PE_aligned:
250 return 0;
252 case DW_EH_PE_textrel:
253 return (_Unwind_Ptr) ob->tbase;
254 case DW_EH_PE_datarel:
255 return (_Unwind_Ptr) ob->dbase;
256 default:
257 gcc_unreachable ();
261 /* Return the FDE pointer encoding from the CIE. */
262 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
264 static int
265 get_cie_encoding (const struct dwarf_cie *cie)
267 const unsigned char *aug, *p;
268 _Unwind_Ptr dummy;
269 _uleb128_t utmp;
270 _sleb128_t stmp;
272 aug = cie->augmentation;
273 if (aug[0] != 'z')
274 return DW_EH_PE_absptr;
276 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
277 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
278 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
279 if (cie->version == 1) /* Skip return address column. */
280 p++;
281 else
282 p = read_uleb128 (p, &utmp);
284 aug++; /* Skip 'z' */
285 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
286 while (1)
288 /* This is what we're looking for. */
289 if (*aug == 'R')
290 return *p;
291 /* Personality encoding and pointer. */
292 else if (*aug == 'P')
294 /* ??? Avoid dereferencing indirect pointers, since we're
295 faking the base address. Gotta keep DW_EH_PE_aligned
296 intact, however. */
297 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
299 /* LSDA encoding. */
300 else if (*aug == 'L')
301 p++;
302 /* Otherwise end of string, or unknown augmentation. */
303 else
304 return DW_EH_PE_absptr;
305 aug++;
309 static inline int
310 get_fde_encoding (const struct dwarf_fde *f)
312 return get_cie_encoding (get_cie (f));
316 /* Sorting an array of FDEs by address.
317 (Ideally we would have the linker sort the FDEs so we don't have to do
318 it at run time. But the linkers are not yet prepared for this.) */
320 /* Comparison routines. Three variants of increasing complexity. */
322 static int
323 fde_unencoded_compare (struct object *ob __attribute__((unused)),
324 const fde *x, const fde *y)
326 _Unwind_Ptr x_ptr = *(_Unwind_Ptr *) x->pc_begin;
327 _Unwind_Ptr y_ptr = *(_Unwind_Ptr *) y->pc_begin;
329 if (x_ptr > y_ptr)
330 return 1;
331 if (x_ptr < y_ptr)
332 return -1;
333 return 0;
336 static int
337 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
339 _Unwind_Ptr base, x_ptr, y_ptr;
341 base = base_from_object (ob->s.b.encoding, ob);
342 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
343 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
345 if (x_ptr > y_ptr)
346 return 1;
347 if (x_ptr < y_ptr)
348 return -1;
349 return 0;
352 static int
353 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
355 int x_encoding, y_encoding;
356 _Unwind_Ptr x_ptr, y_ptr;
358 x_encoding = get_fde_encoding (x);
359 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
360 x->pc_begin, &x_ptr);
362 y_encoding = get_fde_encoding (y);
363 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
364 y->pc_begin, &y_ptr);
366 if (x_ptr > y_ptr)
367 return 1;
368 if (x_ptr < y_ptr)
369 return -1;
370 return 0;
373 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
376 /* This is a special mix of insertion sort and heap sort, optimized for
377 the data sets that actually occur. They look like
378 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
379 I.e. a linearly increasing sequence (coming from functions in the text
380 section), with additionally a few unordered elements (coming from functions
381 in gnu_linkonce sections) whose values are higher than the values in the
382 surrounding linear sequence (but not necessarily higher than the values
383 at the end of the linear sequence!).
384 The worst-case total run time is O(N) + O(n log (n)), where N is the
385 total number of FDEs and n is the number of erratic ones. */
387 struct fde_accumulator
389 struct fde_vector *linear;
390 struct fde_vector *erratic;
393 static inline int
394 start_fde_sort (struct fde_accumulator *accu, size_t count)
396 size_t size;
397 if (! count)
398 return 0;
400 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
401 if ((accu->linear = malloc (size)))
403 accu->linear->count = 0;
404 if ((accu->erratic = malloc (size)))
405 accu->erratic->count = 0;
406 return 1;
408 else
409 return 0;
412 static inline void
413 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
415 if (accu->linear)
416 accu->linear->array[accu->linear->count++] = this_fde;
419 /* Split LINEAR into a linear sequence with low values and an erratic
420 sequence with high values, put the linear one (of longest possible
421 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
423 Because the longest linear sequence we are trying to locate within the
424 incoming LINEAR array can be interspersed with (high valued) erratic
425 entries. We construct a chain indicating the sequenced entries.
426 To avoid having to allocate this chain, we overlay it onto the space of
427 the ERRATIC array during construction. A final pass iterates over the
428 chain to determine what should be placed in the ERRATIC array, and
429 what is the linear sequence. This overlay is safe from aliasing. */
431 static inline void
432 fde_split (struct object *ob, fde_compare_t fde_compare,
433 struct fde_vector *linear, struct fde_vector *erratic)
435 static const fde *marker;
436 size_t count = linear->count;
437 const fde **chain_end = &marker;
438 size_t i, j, k;
440 /* This should optimize out, but it is wise to make sure this assumption
441 is correct. Should these have different sizes, we cannot cast between
442 them and the overlaying onto ERRATIC will not work. */
443 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
445 for (i = 0; i < count; i++)
447 const fde **probe;
449 for (probe = chain_end;
450 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
451 probe = chain_end)
453 chain_end = (const fde **) erratic->array[probe - linear->array];
454 erratic->array[probe - linear->array] = NULL;
456 erratic->array[i] = (const fde *) chain_end;
457 chain_end = &linear->array[i];
460 /* Each entry in LINEAR which is part of the linear sequence we have
461 discovered will correspond to a non-NULL entry in the chain we built in
462 the ERRATIC array. */
463 for (i = j = k = 0; i < count; i++)
464 if (erratic->array[i])
465 linear->array[j++] = linear->array[i];
466 else
467 erratic->array[k++] = linear->array[i];
468 linear->count = j;
469 erratic->count = k;
472 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
474 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
475 for the first (root) node; push it down to its rightful place. */
477 static void
478 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
479 int lo, int hi)
481 int i, j;
483 for (i = lo, j = 2*i+1;
484 j < hi;
485 j = 2*i+1)
487 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
488 ++j;
490 if (fde_compare (ob, a[i], a[j]) < 0)
492 SWAP (a[i], a[j]);
493 i = j;
495 else
496 break;
500 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
501 use a name that does not conflict. */
503 static void
504 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
505 struct fde_vector *erratic)
507 /* For a description of this algorithm, see:
508 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
509 p. 60-61. */
510 const fde ** a = erratic->array;
511 /* A portion of the array is called a "heap" if for all i>=0:
512 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
513 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
514 size_t n = erratic->count;
515 int m;
517 /* Expand our heap incrementally from the end of the array, heapifying
518 each resulting semi-heap as we go. After each step, a[m] is the top
519 of a heap. */
520 for (m = n/2-1; m >= 0; --m)
521 frame_downheap (ob, fde_compare, a, m, n);
523 /* Shrink our heap incrementally from the end of the array, first
524 swapping out the largest element a[0] and then re-heapifying the
525 resulting semi-heap. After each step, a[0..m) is a heap. */
526 for (m = n-1; m >= 1; --m)
528 SWAP (a[0], a[m]);
529 frame_downheap (ob, fde_compare, a, 0, m);
531 #undef SWAP
534 /* Merge V1 and V2, both sorted, and put the result into V1. */
535 static inline void
536 fde_merge (struct object *ob, fde_compare_t fde_compare,
537 struct fde_vector *v1, struct fde_vector *v2)
539 size_t i1, i2;
540 const fde * fde2;
542 i2 = v2->count;
543 if (i2 > 0)
545 i1 = v1->count;
548 i2--;
549 fde2 = v2->array[i2];
550 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
552 v1->array[i1+i2] = v1->array[i1-1];
553 i1--;
555 v1->array[i1+i2] = fde2;
557 while (i2 > 0);
558 v1->count += v2->count;
562 static inline void
563 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
565 fde_compare_t fde_compare;
567 gcc_assert (!accu->linear || accu->linear->count == count);
569 if (ob->s.b.mixed_encoding)
570 fde_compare = fde_mixed_encoding_compare;
571 else if (ob->s.b.encoding == DW_EH_PE_absptr)
572 fde_compare = fde_unencoded_compare;
573 else
574 fde_compare = fde_single_encoding_compare;
576 if (accu->erratic)
578 fde_split (ob, fde_compare, accu->linear, accu->erratic);
579 gcc_assert (accu->linear->count + accu->erratic->count == count);
580 frame_heapsort (ob, fde_compare, accu->erratic);
581 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
582 free (accu->erratic);
584 else
586 /* We've not managed to malloc an erratic array,
587 so heap sort in the linear one. */
588 frame_heapsort (ob, fde_compare, accu->linear);
593 /* Update encoding, mixed_encoding, and pc_begin for OB for the
594 fde array beginning at THIS_FDE. Return the number of fdes
595 encountered along the way. */
597 static size_t
598 classify_object_over_fdes (struct object *ob, const fde *this_fde)
600 const struct dwarf_cie *last_cie = 0;
601 size_t count = 0;
602 int encoding = DW_EH_PE_absptr;
603 _Unwind_Ptr base = 0;
605 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
607 const struct dwarf_cie *this_cie;
608 _Unwind_Ptr mask, pc_begin;
610 /* Skip CIEs. */
611 if (this_fde->CIE_delta == 0)
612 continue;
614 /* Determine the encoding for this FDE. Note mixed encoded
615 objects for later. */
616 this_cie = get_cie (this_fde);
617 if (this_cie != last_cie)
619 last_cie = this_cie;
620 encoding = get_cie_encoding (this_cie);
621 base = base_from_object (encoding, ob);
622 if (ob->s.b.encoding == DW_EH_PE_omit)
623 ob->s.b.encoding = encoding;
624 else if (ob->s.b.encoding != encoding)
625 ob->s.b.mixed_encoding = 1;
628 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
629 &pc_begin);
631 /* Take care to ignore link-once functions that were removed.
632 In these cases, the function address will be NULL, but if
633 the encoding is smaller than a pointer a true NULL may not
634 be representable. Assume 0 in the representable bits is NULL. */
635 mask = size_of_encoded_value (encoding);
636 if (mask < sizeof (void *))
637 mask = (1L << (mask << 3)) - 1;
638 else
639 mask = -1;
641 if ((pc_begin & mask) == 0)
642 continue;
644 count += 1;
645 if ((void *) pc_begin < ob->pc_begin)
646 ob->pc_begin = (void *) pc_begin;
649 return count;
652 static void
653 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
655 const struct dwarf_cie *last_cie = 0;
656 int encoding = ob->s.b.encoding;
657 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
659 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
661 const struct dwarf_cie *this_cie;
663 /* Skip CIEs. */
664 if (this_fde->CIE_delta == 0)
665 continue;
667 if (ob->s.b.mixed_encoding)
669 /* Determine the encoding for this FDE. Note mixed encoded
670 objects for later. */
671 this_cie = get_cie (this_fde);
672 if (this_cie != last_cie)
674 last_cie = this_cie;
675 encoding = get_cie_encoding (this_cie);
676 base = base_from_object (encoding, ob);
680 if (encoding == DW_EH_PE_absptr)
682 if (*(_Unwind_Ptr *) this_fde->pc_begin == 0)
683 continue;
685 else
687 _Unwind_Ptr pc_begin, mask;
689 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
690 &pc_begin);
692 /* Take care to ignore link-once functions that were removed.
693 In these cases, the function address will be NULL, but if
694 the encoding is smaller than a pointer a true NULL may not
695 be representable. Assume 0 in the representable bits is NULL. */
696 mask = size_of_encoded_value (encoding);
697 if (mask < sizeof (void *))
698 mask = (1L << (mask << 3)) - 1;
699 else
700 mask = -1;
702 if ((pc_begin & mask) == 0)
703 continue;
706 fde_insert (accu, this_fde);
710 /* Set up a sorted array of pointers to FDEs for a loaded object. We
711 count up the entries before allocating the array because it's likely to
712 be faster. We can be called multiple times, should we have failed to
713 allocate a sorted fde array on a previous occasion. */
715 static inline void
716 init_object (struct object* ob)
718 struct fde_accumulator accu;
719 size_t count;
721 count = ob->s.b.count;
722 if (count == 0)
724 if (ob->s.b.from_array)
726 fde **p = ob->u.array;
727 for (count = 0; *p; ++p)
728 count += classify_object_over_fdes (ob, *p);
730 else
731 count = classify_object_over_fdes (ob, ob->u.single);
733 /* The count field we have in the main struct object is somewhat
734 limited, but should suffice for virtually all cases. If the
735 counted value doesn't fit, re-write a zero. The worst that
736 happens is that we re-count next time -- admittedly non-trivial
737 in that this implies some 2M fdes, but at least we function. */
738 ob->s.b.count = count;
739 if (ob->s.b.count != count)
740 ob->s.b.count = 0;
743 if (!start_fde_sort (&accu, count))
744 return;
746 if (ob->s.b.from_array)
748 fde **p;
749 for (p = ob->u.array; *p; ++p)
750 add_fdes (ob, &accu, *p);
752 else
753 add_fdes (ob, &accu, ob->u.single);
755 end_fde_sort (ob, &accu, count);
757 /* Save the original fde pointer, since this is the key by which the
758 DSO will deregister the object. */
759 accu.linear->orig_data = ob->u.single;
760 ob->u.sort = accu.linear;
762 ob->s.b.sorted = 1;
765 /* A linear search through a set of FDEs for the given PC. This is
766 used when there was insufficient memory to allocate and sort an
767 array. */
769 static const fde *
770 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
772 const struct dwarf_cie *last_cie = 0;
773 int encoding = ob->s.b.encoding;
774 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
776 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
778 const struct dwarf_cie *this_cie;
779 _Unwind_Ptr pc_begin, pc_range;
781 /* Skip CIEs. */
782 if (this_fde->CIE_delta == 0)
783 continue;
785 if (ob->s.b.mixed_encoding)
787 /* Determine the encoding for this FDE. Note mixed encoded
788 objects for later. */
789 this_cie = get_cie (this_fde);
790 if (this_cie != last_cie)
792 last_cie = this_cie;
793 encoding = get_cie_encoding (this_cie);
794 base = base_from_object (encoding, ob);
798 if (encoding == DW_EH_PE_absptr)
800 pc_begin = ((_Unwind_Ptr *) this_fde->pc_begin)[0];
801 pc_range = ((_Unwind_Ptr *) this_fde->pc_begin)[1];
802 if (pc_begin == 0)
803 continue;
805 else
807 _Unwind_Ptr mask;
808 const unsigned char *p;
810 p = read_encoded_value_with_base (encoding, base,
811 this_fde->pc_begin, &pc_begin);
812 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
814 /* Take care to ignore link-once functions that were removed.
815 In these cases, the function address will be NULL, but if
816 the encoding is smaller than a pointer a true NULL may not
817 be representable. Assume 0 in the representable bits is NULL. */
818 mask = size_of_encoded_value (encoding);
819 if (mask < sizeof (void *))
820 mask = (1L << (mask << 3)) - 1;
821 else
822 mask = -1;
824 if ((pc_begin & mask) == 0)
825 continue;
828 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
829 return this_fde;
832 return NULL;
835 /* Binary search for an FDE containing the given PC. Here are three
836 implementations of increasing complexity. */
838 static inline const fde *
839 binary_search_unencoded_fdes (struct object *ob, void *pc)
841 struct fde_vector *vec = ob->u.sort;
842 size_t lo, hi;
844 for (lo = 0, hi = vec->count; lo < hi; )
846 size_t i = (lo + hi) / 2;
847 const fde *f = vec->array[i];
848 void *pc_begin;
849 uaddr pc_range;
851 pc_begin = ((void **) f->pc_begin)[0];
852 pc_range = ((uaddr *) f->pc_begin)[1];
854 if (pc < pc_begin)
855 hi = i;
856 else if (pc >= pc_begin + pc_range)
857 lo = i + 1;
858 else
859 return f;
862 return NULL;
865 static inline const fde *
866 binary_search_single_encoding_fdes (struct object *ob, void *pc)
868 struct fde_vector *vec = ob->u.sort;
869 int encoding = ob->s.b.encoding;
870 _Unwind_Ptr base = base_from_object (encoding, ob);
871 size_t lo, hi;
873 for (lo = 0, hi = vec->count; lo < hi; )
875 size_t i = (lo + hi) / 2;
876 const fde *f = vec->array[i];
877 _Unwind_Ptr pc_begin, pc_range;
878 const unsigned char *p;
880 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
881 &pc_begin);
882 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
884 if ((_Unwind_Ptr) pc < pc_begin)
885 hi = i;
886 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
887 lo = i + 1;
888 else
889 return f;
892 return NULL;
895 static inline const fde *
896 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
898 struct fde_vector *vec = ob->u.sort;
899 size_t lo, hi;
901 for (lo = 0, hi = vec->count; lo < hi; )
903 size_t i = (lo + hi) / 2;
904 const fde *f = vec->array[i];
905 _Unwind_Ptr pc_begin, pc_range;
906 const unsigned char *p;
907 int encoding;
909 encoding = get_fde_encoding (f);
910 p = read_encoded_value_with_base (encoding,
911 base_from_object (encoding, ob),
912 f->pc_begin, &pc_begin);
913 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
915 if ((_Unwind_Ptr) pc < pc_begin)
916 hi = i;
917 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
918 lo = i + 1;
919 else
920 return f;
923 return NULL;
926 static const fde *
927 search_object (struct object* ob, void *pc)
929 /* If the data hasn't been sorted, try to do this now. We may have
930 more memory available than last time we tried. */
931 if (! ob->s.b.sorted)
933 init_object (ob);
935 /* Despite the above comment, the normal reason to get here is
936 that we've not processed this object before. A quick range
937 check is in order. */
938 if (pc < ob->pc_begin)
939 return NULL;
942 if (ob->s.b.sorted)
944 if (ob->s.b.mixed_encoding)
945 return binary_search_mixed_encoding_fdes (ob, pc);
946 else if (ob->s.b.encoding == DW_EH_PE_absptr)
947 return binary_search_unencoded_fdes (ob, pc);
948 else
949 return binary_search_single_encoding_fdes (ob, pc);
951 else
953 /* Long slow labourious linear search, cos we've no memory. */
954 if (ob->s.b.from_array)
956 fde **p;
957 for (p = ob->u.array; *p ; p++)
959 const fde *f = linear_search_fdes (ob, *p, pc);
960 if (f)
961 return f;
963 return NULL;
965 else
966 return linear_search_fdes (ob, ob->u.single, pc);
970 const fde *
971 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
973 struct object *ob;
974 const fde *f = NULL;
976 init_object_mutex_once ();
977 __gthread_mutex_lock (&object_mutex);
979 /* Linear search through the classified objects, to find the one
980 containing the pc. Note that pc_begin is sorted descending, and
981 we expect objects to be non-overlapping. */
982 for (ob = seen_objects; ob; ob = ob->next)
983 if (pc >= ob->pc_begin)
985 f = search_object (ob, pc);
986 if (f)
987 goto fini;
988 break;
991 /* Classify and search the objects we've not yet processed. */
992 while ((ob = unseen_objects))
994 struct object **p;
996 unseen_objects = ob->next;
997 f = search_object (ob, pc);
999 /* Insert the object into the classified list. */
1000 for (p = &seen_objects; *p ; p = &(*p)->next)
1001 if ((*p)->pc_begin < ob->pc_begin)
1002 break;
1003 ob->next = *p;
1004 *p = ob;
1006 if (f)
1007 goto fini;
1010 fini:
1011 __gthread_mutex_unlock (&object_mutex);
1013 if (f)
1015 int encoding;
1016 _Unwind_Ptr func;
1018 bases->tbase = ob->tbase;
1019 bases->dbase = ob->dbase;
1021 encoding = ob->s.b.encoding;
1022 if (ob->s.b.mixed_encoding)
1023 encoding = get_fde_encoding (f);
1024 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1025 f->pc_begin, &func);
1026 bases->func = (void *) func;
1029 return f;