2009-11-30 Dave Korn <dave.korn.cygwin@gmail.com>
[official-gcc.git] / gcc / unwind-dw2-fde.c
blob60535cfd78011cb0d3631f2c8c8ff6ab51cbaa5a
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008,
3 2009 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #ifndef _Unwind_Find_FDE
28 #include "tconfig.h"
29 #include "tsystem.h"
30 #include "coretypes.h"
31 #include "tm.h"
32 #include "dwarf2.h"
33 #include "unwind.h"
34 #define NO_BASE_OF_ENCODED_VALUE
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
38 #endif
40 /* The unseen_objects list contains objects that have been registered
41 but not yet categorized in any way. The seen_objects list has had
42 its pc_begin and count fields initialized at minimum, and is sorted
43 by decreasing value of pc_begin. */
44 static struct object *unseen_objects;
45 static struct object *seen_objects;
47 #ifdef __GTHREAD_MUTEX_INIT
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
49 #else
50 static __gthread_mutex_t object_mutex;
51 #endif
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
54 static void
55 init_object_mutex (void)
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
60 static void
61 init_object_mutex_once (void)
63 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
64 __gthread_once (&once, init_object_mutex);
66 #else
67 #define init_object_mutex_once()
68 #endif
70 /* Called from crtbegin.o to register the unwind info for an object. */
72 void
73 __register_frame_info_bases (const void *begin, struct object *ob,
74 void *tbase, void *dbase)
76 /* If .eh_frame is empty, don't register at all. */
77 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
78 return;
80 ob->pc_begin = (void *)-1;
81 ob->tbase = tbase;
82 ob->dbase = dbase;
83 ob->u.single = begin;
84 ob->s.i = 0;
85 ob->s.b.encoding = DW_EH_PE_omit;
86 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
87 ob->fde_end = NULL;
88 #endif
90 init_object_mutex_once ();
91 __gthread_mutex_lock (&object_mutex);
93 ob->next = unseen_objects;
94 unseen_objects = ob;
96 __gthread_mutex_unlock (&object_mutex);
99 void
100 __register_frame_info (const void *begin, struct object *ob)
102 __register_frame_info_bases (begin, ob, 0, 0);
105 void
106 __register_frame (void *begin)
108 struct object *ob;
110 /* If .eh_frame is empty, don't register at all. */
111 if (*(uword *) begin == 0)
112 return;
114 ob = malloc (sizeof (struct object));
115 __register_frame_info (begin, ob);
118 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
119 for different translation units. Called from the file generated by
120 collect2. */
122 void
123 __register_frame_info_table_bases (void *begin, struct object *ob,
124 void *tbase, void *dbase)
126 ob->pc_begin = (void *)-1;
127 ob->tbase = tbase;
128 ob->dbase = dbase;
129 ob->u.array = begin;
130 ob->s.i = 0;
131 ob->s.b.from_array = 1;
132 ob->s.b.encoding = DW_EH_PE_omit;
134 init_object_mutex_once ();
135 __gthread_mutex_lock (&object_mutex);
137 ob->next = unseen_objects;
138 unseen_objects = ob;
140 __gthread_mutex_unlock (&object_mutex);
143 void
144 __register_frame_info_table (void *begin, struct object *ob)
146 __register_frame_info_table_bases (begin, ob, 0, 0);
149 void
150 __register_frame_table (void *begin)
152 struct object *ob = malloc (sizeof (struct object));
153 __register_frame_info_table (begin, ob);
156 /* Called from crtbegin.o to deregister the unwind info for an object. */
157 /* ??? Glibc has for a while now exported __register_frame_info and
158 __deregister_frame_info. If we call __register_frame_info_bases
159 from crtbegin (wherein it is declared weak), and this object does
160 not get pulled from libgcc.a for other reasons, then the
161 invocation of __deregister_frame_info will be resolved from glibc.
162 Since the registration did not happen there, we'll die.
164 Therefore, declare a new deregistration entry point that does the
165 exact same thing, but will resolve to the same library as
166 implements __register_frame_info_bases. */
168 void *
169 __deregister_frame_info_bases (const void *begin)
171 struct object **p;
172 struct object *ob = 0;
174 /* If .eh_frame is empty, we haven't registered. */
175 if ((const uword *) begin == 0 || *(const uword *) begin == 0)
176 return ob;
178 init_object_mutex_once ();
179 __gthread_mutex_lock (&object_mutex);
181 for (p = &unseen_objects; *p ; p = &(*p)->next)
182 if ((*p)->u.single == begin)
184 ob = *p;
185 *p = ob->next;
186 goto out;
189 for (p = &seen_objects; *p ; p = &(*p)->next)
190 if ((*p)->s.b.sorted)
192 if ((*p)->u.sort->orig_data == begin)
194 ob = *p;
195 *p = ob->next;
196 free (ob->u.sort);
197 goto out;
200 else
202 if ((*p)->u.single == begin)
204 ob = *p;
205 *p = ob->next;
206 goto out;
210 out:
211 __gthread_mutex_unlock (&object_mutex);
212 gcc_assert (ob);
213 return (void *) ob;
216 void *
217 __deregister_frame_info (const void *begin)
219 return __deregister_frame_info_bases (begin);
222 void
223 __deregister_frame (void *begin)
225 /* If .eh_frame is empty, we haven't registered. */
226 if (*(uword *) begin != 0)
227 free (__deregister_frame_info (begin));
231 /* Like base_of_encoded_value, but take the base from a struct object
232 instead of an _Unwind_Context. */
234 static _Unwind_Ptr
235 base_from_object (unsigned char encoding, struct object *ob)
237 if (encoding == DW_EH_PE_omit)
238 return 0;
240 switch (encoding & 0x70)
242 case DW_EH_PE_absptr:
243 case DW_EH_PE_pcrel:
244 case DW_EH_PE_aligned:
245 return 0;
247 case DW_EH_PE_textrel:
248 return (_Unwind_Ptr) ob->tbase;
249 case DW_EH_PE_datarel:
250 return (_Unwind_Ptr) ob->dbase;
251 default:
252 gcc_unreachable ();
256 /* Return the FDE pointer encoding from the CIE. */
257 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
259 static int
260 get_cie_encoding (const struct dwarf_cie *cie)
262 const unsigned char *aug, *p;
263 _Unwind_Ptr dummy;
264 _uleb128_t utmp;
265 _sleb128_t stmp;
267 aug = cie->augmentation;
268 if (aug[0] != 'z')
269 return DW_EH_PE_absptr;
271 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */
272 p = read_uleb128 (p, &utmp); /* Skip code alignment. */
273 p = read_sleb128 (p, &stmp); /* Skip data alignment. */
274 if (cie->version == 1) /* Skip return address column. */
275 p++;
276 else
277 p = read_uleb128 (p, &utmp);
279 aug++; /* Skip 'z' */
280 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */
281 while (1)
283 /* This is what we're looking for. */
284 if (*aug == 'R')
285 return *p;
286 /* Personality encoding and pointer. */
287 else if (*aug == 'P')
289 /* ??? Avoid dereferencing indirect pointers, since we're
290 faking the base address. Gotta keep DW_EH_PE_aligned
291 intact, however. */
292 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
294 /* LSDA encoding. */
295 else if (*aug == 'L')
296 p++;
297 /* Otherwise end of string, or unknown augmentation. */
298 else
299 return DW_EH_PE_absptr;
300 aug++;
304 static inline int
305 get_fde_encoding (const struct dwarf_fde *f)
307 return get_cie_encoding (get_cie (f));
311 /* Sorting an array of FDEs by address.
312 (Ideally we would have the linker sort the FDEs so we don't have to do
313 it at run time. But the linkers are not yet prepared for this.) */
315 /* Comparison routines. Three variants of increasing complexity. */
317 static int
318 fde_unencoded_compare (struct object *ob __attribute__((unused)),
319 const fde *x, const fde *y)
321 _Unwind_Ptr x_ptr, y_ptr;
322 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr));
323 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr));
325 if (x_ptr > y_ptr)
326 return 1;
327 if (x_ptr < y_ptr)
328 return -1;
329 return 0;
332 static int
333 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y)
335 _Unwind_Ptr base, x_ptr, y_ptr;
337 base = base_from_object (ob->s.b.encoding, ob);
338 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
339 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
341 if (x_ptr > y_ptr)
342 return 1;
343 if (x_ptr < y_ptr)
344 return -1;
345 return 0;
348 static int
349 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y)
351 int x_encoding, y_encoding;
352 _Unwind_Ptr x_ptr, y_ptr;
354 x_encoding = get_fde_encoding (x);
355 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
356 x->pc_begin, &x_ptr);
358 y_encoding = get_fde_encoding (y);
359 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
360 y->pc_begin, &y_ptr);
362 if (x_ptr > y_ptr)
363 return 1;
364 if (x_ptr < y_ptr)
365 return -1;
366 return 0;
369 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *);
372 /* This is a special mix of insertion sort and heap sort, optimized for
373 the data sets that actually occur. They look like
374 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
375 I.e. a linearly increasing sequence (coming from functions in the text
376 section), with additionally a few unordered elements (coming from functions
377 in gnu_linkonce sections) whose values are higher than the values in the
378 surrounding linear sequence (but not necessarily higher than the values
379 at the end of the linear sequence!).
380 The worst-case total run time is O(N) + O(n log (n)), where N is the
381 total number of FDEs and n is the number of erratic ones. */
383 struct fde_accumulator
385 struct fde_vector *linear;
386 struct fde_vector *erratic;
389 static inline int
390 start_fde_sort (struct fde_accumulator *accu, size_t count)
392 size_t size;
393 if (! count)
394 return 0;
396 size = sizeof (struct fde_vector) + sizeof (const fde *) * count;
397 if ((accu->linear = malloc (size)))
399 accu->linear->count = 0;
400 if ((accu->erratic = malloc (size)))
401 accu->erratic->count = 0;
402 return 1;
404 else
405 return 0;
408 static inline void
409 fde_insert (struct fde_accumulator *accu, const fde *this_fde)
411 if (accu->linear)
412 accu->linear->array[accu->linear->count++] = this_fde;
415 /* Split LINEAR into a linear sequence with low values and an erratic
416 sequence with high values, put the linear one (of longest possible
417 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
419 Because the longest linear sequence we are trying to locate within the
420 incoming LINEAR array can be interspersed with (high valued) erratic
421 entries. We construct a chain indicating the sequenced entries.
422 To avoid having to allocate this chain, we overlay it onto the space of
423 the ERRATIC array during construction. A final pass iterates over the
424 chain to determine what should be placed in the ERRATIC array, and
425 what is the linear sequence. This overlay is safe from aliasing. */
427 static inline void
428 fde_split (struct object *ob, fde_compare_t fde_compare,
429 struct fde_vector *linear, struct fde_vector *erratic)
431 static const fde *marker;
432 size_t count = linear->count;
433 const fde *const *chain_end = &marker;
434 size_t i, j, k;
436 /* This should optimize out, but it is wise to make sure this assumption
437 is correct. Should these have different sizes, we cannot cast between
438 them and the overlaying onto ERRATIC will not work. */
439 gcc_assert (sizeof (const fde *) == sizeof (const fde **));
441 for (i = 0; i < count; i++)
443 const fde *const *probe;
445 for (probe = chain_end;
446 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
447 probe = chain_end)
449 chain_end = (const fde *const*) erratic->array[probe - linear->array];
450 erratic->array[probe - linear->array] = NULL;
452 erratic->array[i] = (const fde *) chain_end;
453 chain_end = &linear->array[i];
456 /* Each entry in LINEAR which is part of the linear sequence we have
457 discovered will correspond to a non-NULL entry in the chain we built in
458 the ERRATIC array. */
459 for (i = j = k = 0; i < count; i++)
460 if (erratic->array[i])
461 linear->array[j++] = linear->array[i];
462 else
463 erratic->array[k++] = linear->array[i];
464 linear->count = j;
465 erratic->count = k;
468 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0)
470 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly
471 for the first (root) node; push it down to its rightful place. */
473 static void
474 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a,
475 int lo, int hi)
477 int i, j;
479 for (i = lo, j = 2*i+1;
480 j < hi;
481 j = 2*i+1)
483 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0)
484 ++j;
486 if (fde_compare (ob, a[i], a[j]) < 0)
488 SWAP (a[i], a[j]);
489 i = j;
491 else
492 break;
496 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
497 use a name that does not conflict. */
499 static void
500 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
501 struct fde_vector *erratic)
503 /* For a description of this algorithm, see:
504 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
505 p. 60-61. */
506 const fde ** a = erratic->array;
507 /* A portion of the array is called a "heap" if for all i>=0:
508 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
509 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
510 size_t n = erratic->count;
511 int m;
513 /* Expand our heap incrementally from the end of the array, heapifying
514 each resulting semi-heap as we go. After each step, a[m] is the top
515 of a heap. */
516 for (m = n/2-1; m >= 0; --m)
517 frame_downheap (ob, fde_compare, a, m, n);
519 /* Shrink our heap incrementally from the end of the array, first
520 swapping out the largest element a[0] and then re-heapifying the
521 resulting semi-heap. After each step, a[0..m) is a heap. */
522 for (m = n-1; m >= 1; --m)
524 SWAP (a[0], a[m]);
525 frame_downheap (ob, fde_compare, a, 0, m);
527 #undef SWAP
530 /* Merge V1 and V2, both sorted, and put the result into V1. */
531 static inline void
532 fde_merge (struct object *ob, fde_compare_t fde_compare,
533 struct fde_vector *v1, struct fde_vector *v2)
535 size_t i1, i2;
536 const fde * fde2;
538 i2 = v2->count;
539 if (i2 > 0)
541 i1 = v1->count;
544 i2--;
545 fde2 = v2->array[i2];
546 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
548 v1->array[i1+i2] = v1->array[i1-1];
549 i1--;
551 v1->array[i1+i2] = fde2;
553 while (i2 > 0);
554 v1->count += v2->count;
558 static inline void
559 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
561 fde_compare_t fde_compare;
563 gcc_assert (!accu->linear || accu->linear->count == count);
565 if (ob->s.b.mixed_encoding)
566 fde_compare = fde_mixed_encoding_compare;
567 else if (ob->s.b.encoding == DW_EH_PE_absptr)
568 fde_compare = fde_unencoded_compare;
569 else
570 fde_compare = fde_single_encoding_compare;
572 if (accu->erratic)
574 fde_split (ob, fde_compare, accu->linear, accu->erratic);
575 gcc_assert (accu->linear->count + accu->erratic->count == count);
576 frame_heapsort (ob, fde_compare, accu->erratic);
577 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
578 free (accu->erratic);
580 else
582 /* We've not managed to malloc an erratic array,
583 so heap sort in the linear one. */
584 frame_heapsort (ob, fde_compare, accu->linear);
589 /* Update encoding, mixed_encoding, and pc_begin for OB for the
590 fde array beginning at THIS_FDE. Return the number of fdes
591 encountered along the way. */
593 static size_t
594 classify_object_over_fdes (struct object *ob, const fde *this_fde)
596 const struct dwarf_cie *last_cie = 0;
597 size_t count = 0;
598 int encoding = DW_EH_PE_absptr;
599 _Unwind_Ptr base = 0;
601 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
603 const struct dwarf_cie *this_cie;
604 _Unwind_Ptr mask, pc_begin;
606 /* Skip CIEs. */
607 if (this_fde->CIE_delta == 0)
608 continue;
610 /* Determine the encoding for this FDE. Note mixed encoded
611 objects for later. */
612 this_cie = get_cie (this_fde);
613 if (this_cie != last_cie)
615 last_cie = this_cie;
616 encoding = get_cie_encoding (this_cie);
617 base = base_from_object (encoding, ob);
618 if (ob->s.b.encoding == DW_EH_PE_omit)
619 ob->s.b.encoding = encoding;
620 else if (ob->s.b.encoding != encoding)
621 ob->s.b.mixed_encoding = 1;
624 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
625 &pc_begin);
627 /* Take care to ignore link-once functions that were removed.
628 In these cases, the function address will be NULL, but if
629 the encoding is smaller than a pointer a true NULL may not
630 be representable. Assume 0 in the representable bits is NULL. */
631 mask = size_of_encoded_value (encoding);
632 if (mask < sizeof (void *))
633 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
634 else
635 mask = -1;
637 if ((pc_begin & mask) == 0)
638 continue;
640 count += 1;
641 if ((void *) pc_begin < ob->pc_begin)
642 ob->pc_begin = (void *) pc_begin;
645 return count;
648 static void
649 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde)
651 const struct dwarf_cie *last_cie = 0;
652 int encoding = ob->s.b.encoding;
653 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
655 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
657 const struct dwarf_cie *this_cie;
659 /* Skip CIEs. */
660 if (this_fde->CIE_delta == 0)
661 continue;
663 if (ob->s.b.mixed_encoding)
665 /* Determine the encoding for this FDE. Note mixed encoded
666 objects for later. */
667 this_cie = get_cie (this_fde);
668 if (this_cie != last_cie)
670 last_cie = this_cie;
671 encoding = get_cie_encoding (this_cie);
672 base = base_from_object (encoding, ob);
676 if (encoding == DW_EH_PE_absptr)
678 _Unwind_Ptr ptr;
679 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr));
680 if (ptr == 0)
681 continue;
683 else
685 _Unwind_Ptr pc_begin, mask;
687 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
688 &pc_begin);
690 /* Take care to ignore link-once functions that were removed.
691 In these cases, the function address will be NULL, but if
692 the encoding is smaller than a pointer a true NULL may not
693 be representable. Assume 0 in the representable bits is NULL. */
694 mask = size_of_encoded_value (encoding);
695 if (mask < sizeof (void *))
696 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
697 else
698 mask = -1;
700 if ((pc_begin & mask) == 0)
701 continue;
704 fde_insert (accu, this_fde);
708 /* Set up a sorted array of pointers to FDEs for a loaded object. We
709 count up the entries before allocating the array because it's likely to
710 be faster. We can be called multiple times, should we have failed to
711 allocate a sorted fde array on a previous occasion. */
713 static inline void
714 init_object (struct object* ob)
716 struct fde_accumulator accu;
717 size_t count;
719 count = ob->s.b.count;
720 if (count == 0)
722 if (ob->s.b.from_array)
724 fde **p = ob->u.array;
725 for (count = 0; *p; ++p)
726 count += classify_object_over_fdes (ob, *p);
728 else
729 count = classify_object_over_fdes (ob, ob->u.single);
731 /* The count field we have in the main struct object is somewhat
732 limited, but should suffice for virtually all cases. If the
733 counted value doesn't fit, re-write a zero. The worst that
734 happens is that we re-count next time -- admittedly non-trivial
735 in that this implies some 2M fdes, but at least we function. */
736 ob->s.b.count = count;
737 if (ob->s.b.count != count)
738 ob->s.b.count = 0;
741 if (!start_fde_sort (&accu, count))
742 return;
744 if (ob->s.b.from_array)
746 fde **p;
747 for (p = ob->u.array; *p; ++p)
748 add_fdes (ob, &accu, *p);
750 else
751 add_fdes (ob, &accu, ob->u.single);
753 end_fde_sort (ob, &accu, count);
755 /* Save the original fde pointer, since this is the key by which the
756 DSO will deregister the object. */
757 accu.linear->orig_data = ob->u.single;
758 ob->u.sort = accu.linear;
760 ob->s.b.sorted = 1;
763 /* A linear search through a set of FDEs for the given PC. This is
764 used when there was insufficient memory to allocate and sort an
765 array. */
767 static const fde *
768 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc)
770 const struct dwarf_cie *last_cie = 0;
771 int encoding = ob->s.b.encoding;
772 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
774 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde))
776 const struct dwarf_cie *this_cie;
777 _Unwind_Ptr pc_begin, pc_range;
779 /* Skip CIEs. */
780 if (this_fde->CIE_delta == 0)
781 continue;
783 if (ob->s.b.mixed_encoding)
785 /* Determine the encoding for this FDE. Note mixed encoded
786 objects for later. */
787 this_cie = get_cie (this_fde);
788 if (this_cie != last_cie)
790 last_cie = this_cie;
791 encoding = get_cie_encoding (this_cie);
792 base = base_from_object (encoding, ob);
796 if (encoding == DW_EH_PE_absptr)
798 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin;
799 pc_begin = pc_array[0];
800 pc_range = pc_array[1];
801 if (pc_begin == 0)
802 continue;
804 else
806 _Unwind_Ptr mask;
807 const unsigned char *p;
809 p = read_encoded_value_with_base (encoding, base,
810 this_fde->pc_begin, &pc_begin);
811 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
813 /* Take care to ignore link-once functions that were removed.
814 In these cases, the function address will be NULL, but if
815 the encoding is smaller than a pointer a true NULL may not
816 be representable. Assume 0 in the representable bits is NULL. */
817 mask = size_of_encoded_value (encoding);
818 if (mask < sizeof (void *))
819 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1;
820 else
821 mask = -1;
823 if ((pc_begin & mask) == 0)
824 continue;
827 if ((_Unwind_Ptr) pc - pc_begin < pc_range)
828 return this_fde;
831 return NULL;
834 /* Binary search for an FDE containing the given PC. Here are three
835 implementations of increasing complexity. */
837 static inline const fde *
838 binary_search_unencoded_fdes (struct object *ob, void *pc)
840 struct fde_vector *vec = ob->u.sort;
841 size_t lo, hi;
843 for (lo = 0, hi = vec->count; lo < hi; )
845 size_t i = (lo + hi) / 2;
846 const fde *const f = vec->array[i];
847 void *pc_begin;
848 uaddr pc_range;
849 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *));
850 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr));
852 if (pc < pc_begin)
853 hi = i;
854 else if (pc >= pc_begin + pc_range)
855 lo = i + 1;
856 else
857 return f;
860 return NULL;
863 static inline const fde *
864 binary_search_single_encoding_fdes (struct object *ob, void *pc)
866 struct fde_vector *vec = ob->u.sort;
867 int encoding = ob->s.b.encoding;
868 _Unwind_Ptr base = base_from_object (encoding, ob);
869 size_t lo, hi;
871 for (lo = 0, hi = vec->count; lo < hi; )
873 size_t i = (lo + hi) / 2;
874 const fde *f = vec->array[i];
875 _Unwind_Ptr pc_begin, pc_range;
876 const unsigned char *p;
878 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
879 &pc_begin);
880 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
882 if ((_Unwind_Ptr) pc < pc_begin)
883 hi = i;
884 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
885 lo = i + 1;
886 else
887 return f;
890 return NULL;
893 static inline const fde *
894 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
896 struct fde_vector *vec = ob->u.sort;
897 size_t lo, hi;
899 for (lo = 0, hi = vec->count; lo < hi; )
901 size_t i = (lo + hi) / 2;
902 const fde *f = vec->array[i];
903 _Unwind_Ptr pc_begin, pc_range;
904 const unsigned char *p;
905 int encoding;
907 encoding = get_fde_encoding (f);
908 p = read_encoded_value_with_base (encoding,
909 base_from_object (encoding, ob),
910 f->pc_begin, &pc_begin);
911 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
913 if ((_Unwind_Ptr) pc < pc_begin)
914 hi = i;
915 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range)
916 lo = i + 1;
917 else
918 return f;
921 return NULL;
924 static const fde *
925 search_object (struct object* ob, void *pc)
927 /* If the data hasn't been sorted, try to do this now. We may have
928 more memory available than last time we tried. */
929 if (! ob->s.b.sorted)
931 init_object (ob);
933 /* Despite the above comment, the normal reason to get here is
934 that we've not processed this object before. A quick range
935 check is in order. */
936 if (pc < ob->pc_begin)
937 return NULL;
940 if (ob->s.b.sorted)
942 if (ob->s.b.mixed_encoding)
943 return binary_search_mixed_encoding_fdes (ob, pc);
944 else if (ob->s.b.encoding == DW_EH_PE_absptr)
945 return binary_search_unencoded_fdes (ob, pc);
946 else
947 return binary_search_single_encoding_fdes (ob, pc);
949 else
951 /* Long slow laborious linear search, cos we've no memory. */
952 if (ob->s.b.from_array)
954 fde **p;
955 for (p = ob->u.array; *p ; p++)
957 const fde *f = linear_search_fdes (ob, *p, pc);
958 if (f)
959 return f;
961 return NULL;
963 else
964 return linear_search_fdes (ob, ob->u.single, pc);
968 const fde *
969 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
971 struct object *ob;
972 const fde *f = NULL;
974 init_object_mutex_once ();
975 __gthread_mutex_lock (&object_mutex);
977 /* Linear search through the classified objects, to find the one
978 containing the pc. Note that pc_begin is sorted descending, and
979 we expect objects to be non-overlapping. */
980 for (ob = seen_objects; ob; ob = ob->next)
981 if (pc >= ob->pc_begin)
983 f = search_object (ob, pc);
984 if (f)
985 goto fini;
986 break;
989 /* Classify and search the objects we've not yet processed. */
990 while ((ob = unseen_objects))
992 struct object **p;
994 unseen_objects = ob->next;
995 f = search_object (ob, pc);
997 /* Insert the object into the classified list. */
998 for (p = &seen_objects; *p ; p = &(*p)->next)
999 if ((*p)->pc_begin < ob->pc_begin)
1000 break;
1001 ob->next = *p;
1002 *p = ob;
1004 if (f)
1005 goto fini;
1008 fini:
1009 __gthread_mutex_unlock (&object_mutex);
1011 if (f)
1013 int encoding;
1014 _Unwind_Ptr func;
1016 bases->tbase = ob->tbase;
1017 bases->dbase = ob->dbase;
1019 encoding = ob->s.b.encoding;
1020 if (ob->s.b.mixed_encoding)
1021 encoding = get_fde_encoding (f);
1022 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
1023 f->pc_begin, &func);
1024 bases->func = (void *) func;
1027 return f;