* expr.c (store_field): Don't set MEM_ALIAS_SET for a field
[official-gcc.git] / gcc / unwind-dw2-fde.c
blob8370e7a77deac993c8bd8d9d8fa3dd72ca3dd83f
1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
19 executable.)
21 GNU CC is distributed in the hope that it will be useful,
22 but WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 GNU General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with GNU CC; see the file COPYING. If not, write to
28 the Free Software Foundation, 59 Temple Place - Suite 330,
29 Boston, MA 02111-1307, USA. */
31 #include "tconfig.h"
32 #include "tsystem.h"
33 #include "dwarf2.h"
34 #include "unwind.h"
35 #include "unwind-pe.h"
36 #include "unwind-dw2-fde.h"
37 #include "gthr.h"
39 /* The unseen_objects list contains objects that have been registered
40 but not yet categorized in any way. The seen_objects list has had
41 it's pc_begin and count fields initialized at minimum, and is sorted
42 by decreasing value of pc_begin. */
43 static struct object *unseen_objects;
44 static struct object *seen_objects;
46 #ifdef __GTHREAD_MUTEX_INIT
47 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT;
48 #else
49 static __gthread_mutex_t object_mutex;
50 #endif
52 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
53 static void
54 init_object_mutex (void)
56 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex);
59 static void
60 init_object_mutex_once (void)
62 static __gthread_once_t once = __GTHREAD_ONCE_INIT;
63 __gthread_once (&once, init_object_mutex);
65 #else
66 #define init_object_mutex_once()
67 #endif
69 /* Called from crtbegin.o to register the unwind info for an object. */
71 void
72 __register_frame_info_bases (void *begin, struct object *ob,
73 void *tbase, void *dbase)
75 ob->pc_begin = (void *)-1;
76 ob->tbase = tbase;
77 ob->dbase = dbase;
78 ob->u.single = begin;
79 ob->s.i = 0;
80 ob->s.b.encoding = DW_EH_PE_omit;
82 init_object_mutex_once ();
83 __gthread_mutex_lock (&object_mutex);
85 ob->next = unseen_objects;
86 unseen_objects = ob;
88 __gthread_mutex_unlock (&object_mutex);
91 void
92 __register_frame_info (void *begin, struct object *ob)
94 __register_frame_info_bases (begin, ob, 0, 0);
97 void
98 __register_frame (void *begin)
100 struct object *ob = (struct object *) malloc (sizeof (struct object));
101 __register_frame_info (begin, ob);
104 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
105 for different translation units. Called from the file generated by
106 collect2. */
108 void
109 __register_frame_info_table_bases (void *begin, struct object *ob,
110 void *tbase, void *dbase)
112 ob->pc_begin = (void *)-1;
113 ob->tbase = tbase;
114 ob->dbase = dbase;
115 ob->u.array = begin;
116 ob->s.i = 0;
117 ob->s.b.from_array = 1;
118 ob->s.b.encoding = DW_EH_PE_omit;
120 init_object_mutex_once ();
121 __gthread_mutex_lock (&object_mutex);
123 ob->next = unseen_objects;
124 unseen_objects = ob;
126 __gthread_mutex_unlock (&object_mutex);
129 void
130 __register_frame_info_table (void *begin, struct object *ob)
132 __register_frame_info_table_bases (begin, ob, 0, 0);
135 void
136 __register_frame_table (void *begin)
138 struct object *ob = (struct object *) malloc (sizeof (struct object));
139 __register_frame_info_table (begin, ob);
142 /* Called from crtbegin.o to deregister the unwind info for an object. */
143 /* ??? Glibc has for a while now exported __register_frame_info and
144 __deregister_frame_info. If we call __register_frame_info_bases
145 from crtbegin (wherein it is declared weak), and this object does
146 not get pulled from libgcc.a for other reasons, then the
147 invocation of __deregister_frame_info will be resolved from glibc.
148 Since the registration did not happen there, we'll abort.
150 Therefore, declare a new deregistration entry point that does the
151 exact same thing, but will resolve to the same library as
152 implements __register_frame_info_bases. */
154 void *
155 __deregister_frame_info_bases (void *begin)
157 struct object **p;
158 struct object *ob = 0;
160 init_object_mutex_once ();
161 __gthread_mutex_lock (&object_mutex);
163 for (p = &unseen_objects; *p ; p = &(*p)->next)
164 if ((*p)->u.single == begin)
166 ob = *p;
167 *p = ob->next;
168 goto out;
171 for (p = &seen_objects; *p ; p = &(*p)->next)
172 if ((*p)->s.b.sorted)
174 if ((*p)->u.sort->orig_data == begin)
176 ob = *p;
177 *p = ob->next;
178 free (ob->u.sort);
179 goto out;
182 else
184 if ((*p)->u.single == begin)
186 ob = *p;
187 *p = ob->next;
188 goto out;
192 __gthread_mutex_unlock (&object_mutex);
193 abort ();
195 out:
196 __gthread_mutex_unlock (&object_mutex);
197 return (void *) ob;
200 void *
201 __deregister_frame_info (void *begin)
203 return __deregister_frame_info_bases (begin);
206 void
207 __deregister_frame (void *begin)
209 free (__deregister_frame_info (begin));
213 /* Like base_of_encoded_value, but take the base from a struct object
214 instead of an _Unwind_Context. */
216 static _Unwind_Ptr
217 base_from_object (unsigned char encoding, struct object *ob)
219 if (encoding == DW_EH_PE_omit)
220 return 0;
222 switch (encoding & 0x70)
224 case DW_EH_PE_absptr:
225 case DW_EH_PE_pcrel:
226 case DW_EH_PE_aligned:
227 return 0;
229 case DW_EH_PE_textrel:
230 return (_Unwind_Ptr) ob->tbase;
231 case DW_EH_PE_datarel:
232 return (_Unwind_Ptr) ob->dbase;
234 abort ();
237 /* Return the FDE pointer encoding from the CIE. */
238 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
240 static int
241 get_cie_encoding (struct dwarf_cie *cie)
243 const unsigned char *aug, *p;
244 _Unwind_Ptr dummy;
246 aug = cie->augmentation;
247 if (aug[0] != 'z')
248 return DW_EH_PE_absptr;
250 p = aug + strlen (aug) + 1; /* Skip the augmentation string. */
251 p = read_uleb128 (p, &dummy); /* Skip code alignment. */
252 p = read_sleb128 (p, &dummy); /* Skip data alignment. */
253 p++; /* Skip return address column. */
255 aug++; /* Skip 'z' */
256 p = read_uleb128 (p, &dummy); /* Skip augmentation length. */
257 while (1)
259 /* This is what we're looking for. */
260 if (*aug == 'R')
261 return *p;
262 /* Personality encoding and pointer. */
263 else if (*aug == 'P')
265 /* ??? Avoid dereferencing indirect pointers, since we're
266 faking the base address. Gotta keep DW_EH_PE_aligned
267 intact, however. */
268 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy);
270 /* LSDA encoding. */
271 else if (*aug == 'L')
272 p++;
273 /* Otherwise end of string, or unknown augmentation. */
274 else
275 return DW_EH_PE_absptr;
276 aug++;
280 static inline int
281 get_fde_encoding (struct dwarf_fde *f)
283 return get_cie_encoding (get_cie (f));
287 /* Sorting an array of FDEs by address.
288 (Ideally we would have the linker sort the FDEs so we don't have to do
289 it at run time. But the linkers are not yet prepared for this.) */
291 /* Comparison routines. Three variants of increasing complexity. */
293 static saddr
294 fde_unencoded_compare (struct object *ob __attribute__((unused)),
295 fde *x, fde *y)
297 return *(saddr *)x->pc_begin - *(saddr *)y->pc_begin;
300 static saddr
301 fde_single_encoding_compare (struct object *ob, fde *x, fde *y)
303 _Unwind_Ptr base, x_ptr, y_ptr;
305 base = base_from_object (ob->s.b.encoding, ob);
306 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr);
307 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr);
309 return x_ptr - y_ptr;
312 static saddr
313 fde_mixed_encoding_compare (struct object *ob, fde *x, fde *y)
315 int x_encoding, y_encoding;
316 _Unwind_Ptr x_ptr, y_ptr;
318 x_encoding = get_fde_encoding (x);
319 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob),
320 x->pc_begin, &x_ptr);
322 y_encoding = get_fde_encoding (y);
323 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob),
324 y->pc_begin, &y_ptr);
326 return x_ptr - y_ptr;
329 typedef saddr (*fde_compare_t) (struct object *, fde *, fde *);
332 /* This is a special mix of insertion sort and heap sort, optimized for
333 the data sets that actually occur. They look like
334 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
335 I.e. a linearly increasing sequence (coming from functions in the text
336 section), with additionally a few unordered elements (coming from functions
337 in gnu_linkonce sections) whose values are higher than the values in the
338 surrounding linear sequence (but not necessarily higher than the values
339 at the end of the linear sequence!).
340 The worst-case total run time is O(N) + O(n log (n)), where N is the
341 total number of FDEs and n is the number of erratic ones. */
343 struct fde_accumulator
345 struct fde_vector *linear;
346 struct fde_vector *erratic;
349 static inline int
350 start_fde_sort (struct fde_accumulator *accu, size_t count)
352 size_t size;
353 if (! count)
354 return 0;
356 size = sizeof (struct fde_vector) + sizeof (fde *) * count;
357 if ((accu->linear = (struct fde_vector *) malloc (size)))
359 accu->linear->count = 0;
360 if ((accu->erratic = (struct fde_vector *) malloc (size)))
361 accu->erratic->count = 0;
362 return 1;
364 else
365 return 0;
368 static inline void
369 fde_insert (struct fde_accumulator *accu, fde *this_fde)
371 if (accu->linear)
372 accu->linear->array[accu->linear->count++] = this_fde;
375 /* Split LINEAR into a linear sequence with low values and an erratic
376 sequence with high values, put the linear one (of longest possible
377 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
379 Because the longest linear sequence we are trying to locate within the
380 incoming LINEAR array can be interspersed with (high valued) erratic
381 entries. We construct a chain indicating the sequenced entries.
382 To avoid having to allocate this chain, we overlay it onto the space of
383 the ERRATIC array during construction. A final pass iterates over the
384 chain to determine what should be placed in the ERRATIC array, and
385 what is the linear sequence. This overlay is safe from aliasing. */
387 static inline void
388 fde_split (struct object *ob, fde_compare_t fde_compare,
389 struct fde_vector *linear, struct fde_vector *erratic)
391 static fde *marker;
392 size_t count = linear->count;
393 fde **chain_end = &marker;
394 size_t i, j, k;
396 /* This should optimize out, but it is wise to make sure this assumption
397 is correct. Should these have different sizes, we cannot cast between
398 them and the overlaying onto ERRATIC will not work. */
399 if (sizeof (fde *) != sizeof (fde **))
400 abort ();
402 for (i = 0; i < count; i++)
404 fde **probe;
406 for (probe = chain_end;
407 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0;
408 probe = chain_end)
410 chain_end = (fde **)erratic->array[probe - linear->array];
411 erratic->array[probe - linear->array] = NULL;
413 erratic->array[i] = (fde *)chain_end;
414 chain_end = &linear->array[i];
417 /* Each entry in LINEAR which is part of the linear sequence we have
418 discovered will correspond to a non-NULL entry in the chain we built in
419 the ERRATIC array. */
420 for (i = j = k = 0; i < count; i++)
421 if (erratic->array[i])
422 linear->array[j++] = linear->array[i];
423 else
424 erratic->array[k++] = linear->array[i];
425 linear->count = j;
426 erratic->count = k;
429 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
430 use a name that does not conflict. */
432 static void
433 frame_heapsort (struct object *ob, fde_compare_t fde_compare,
434 struct fde_vector *erratic)
436 /* For a description of this algorithm, see:
437 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
438 p. 60-61. */
439 fde ** a = erratic->array;
440 /* A portion of the array is called a "heap" if for all i>=0:
441 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
442 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
443 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
444 size_t n = erratic->count;
445 size_t m = n;
446 size_t i;
448 while (m > 0)
450 /* Invariant: a[m..n-1] is a heap. */
451 m--;
452 for (i = m; 2*i+1 < n; )
454 if (2*i+2 < n
455 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
456 && fde_compare (ob, a[2*i+2], a[i]) > 0)
458 SWAP (a[i], a[2*i+2]);
459 i = 2*i+2;
461 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
463 SWAP (a[i], a[2*i+1]);
464 i = 2*i+1;
466 else
467 break;
470 while (n > 1)
472 /* Invariant: a[0..n-1] is a heap. */
473 n--;
474 SWAP (a[0], a[n]);
475 for (i = 0; 2*i+1 < n; )
477 if (2*i+2 < n
478 && fde_compare (ob, a[2*i+2], a[2*i+1]) > 0
479 && fde_compare (ob, a[2*i+2], a[i]) > 0)
481 SWAP (a[i], a[2*i+2]);
482 i = 2*i+2;
484 else if (fde_compare (ob, a[2*i+1], a[i]) > 0)
486 SWAP (a[i], a[2*i+1]);
487 i = 2*i+1;
489 else
490 break;
493 #undef SWAP
496 /* Merge V1 and V2, both sorted, and put the result into V1. */
497 static inline void
498 fde_merge (struct object *ob, fde_compare_t fde_compare,
499 struct fde_vector *v1, struct fde_vector *v2)
501 size_t i1, i2;
502 fde * fde2;
504 i2 = v2->count;
505 if (i2 > 0)
507 i1 = v1->count;
508 do {
509 i2--;
510 fde2 = v2->array[i2];
511 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0)
513 v1->array[i1+i2] = v1->array[i1-1];
514 i1--;
516 v1->array[i1+i2] = fde2;
517 } while (i2 > 0);
518 v1->count += v2->count;
522 static inline void
523 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count)
525 fde_compare_t fde_compare;
527 if (accu->linear && accu->linear->count != count)
528 abort ();
530 if (ob->s.b.mixed_encoding)
531 fde_compare = fde_mixed_encoding_compare;
532 else if (ob->s.b.encoding == DW_EH_PE_absptr)
533 fde_compare = fde_unencoded_compare;
534 else
535 fde_compare = fde_single_encoding_compare;
537 if (accu->erratic)
539 fde_split (ob, fde_compare, accu->linear, accu->erratic);
540 if (accu->linear->count + accu->erratic->count != count)
541 abort ();
542 frame_heapsort (ob, fde_compare, accu->erratic);
543 fde_merge (ob, fde_compare, accu->linear, accu->erratic);
544 free (accu->erratic);
546 else
548 /* We've not managed to malloc an erratic array,
549 so heap sort in the linear one. */
550 frame_heapsort (ob, fde_compare, accu->linear);
555 /* Update encoding, mixed_encoding, and pc_begin for OB for the
556 fde array beginning at THIS_FDE. Return the number of fdes
557 encountered along the way. */
559 static size_t
560 classify_object_over_fdes (struct object *ob, fde *this_fde)
562 struct dwarf_cie *last_cie = 0;
563 size_t count = 0;
564 int encoding = DW_EH_PE_absptr;
565 _Unwind_Ptr base = 0;
567 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
569 struct dwarf_cie *this_cie;
570 _Unwind_Ptr mask, pc_begin;
572 /* Skip CIEs. */
573 if (this_fde->CIE_delta == 0)
574 continue;
576 /* Determine the encoding for this FDE. Note mixed encoded
577 objects for later. */
578 this_cie = get_cie (this_fde);
579 if (this_cie != last_cie)
581 last_cie = this_cie;
582 encoding = get_cie_encoding (this_cie);
583 base = base_from_object (encoding, ob);
584 if (ob->s.b.encoding == DW_EH_PE_omit)
585 ob->s.b.encoding = encoding;
586 else if (ob->s.b.encoding != encoding)
587 ob->s.b.mixed_encoding = 1;
590 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
591 &pc_begin);
593 /* Take care to ignore link-once functions that were removed.
594 In these cases, the function address will be NULL, but if
595 the encoding is smaller than a pointer a true NULL may not
596 be representable. Assume 0 in the representable bits is NULL. */
597 mask = size_of_encoded_value (encoding);
598 if (mask < sizeof (void *))
599 mask = (1L << (mask << 3)) - 1;
600 else
601 mask = -1;
603 if ((pc_begin & mask) == 0)
604 continue;
606 count += 1;
607 if ((void *)pc_begin < ob->pc_begin)
608 ob->pc_begin = (void *)pc_begin;
611 return count;
614 static void
615 add_fdes (struct object *ob, struct fde_accumulator *accu, fde *this_fde)
617 struct dwarf_cie *last_cie = 0;
618 int encoding = ob->s.b.encoding;
619 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
621 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
623 struct dwarf_cie *this_cie;
625 /* Skip CIEs. */
626 if (this_fde->CIE_delta == 0)
627 continue;
629 if (ob->s.b.mixed_encoding)
631 /* Determine the encoding for this FDE. Note mixed encoded
632 objects for later. */
633 this_cie = get_cie (this_fde);
634 if (this_cie != last_cie)
636 last_cie = this_cie;
637 encoding = get_cie_encoding (this_cie);
638 base = base_from_object (encoding, ob);
642 if (encoding == DW_EH_PE_absptr)
644 if (*(_Unwind_Ptr *)this_fde->pc_begin == 0)
645 continue;
647 else
649 _Unwind_Ptr pc_begin, mask;
651 read_encoded_value_with_base (encoding, base, this_fde->pc_begin,
652 &pc_begin);
654 /* Take care to ignore link-once functions that were removed.
655 In these cases, the function address will be NULL, but if
656 the encoding is smaller than a pointer a true NULL may not
657 be representable. Assume 0 in the representable bits is NULL. */
658 mask = size_of_encoded_value (encoding);
659 if (mask < sizeof (void *))
660 mask = (1L << (mask << 3)) - 1;
661 else
662 mask = -1;
664 if ((pc_begin & mask) == 0)
665 continue;
668 fde_insert (accu, this_fde);
672 /* Set up a sorted array of pointers to FDEs for a loaded object. We
673 count up the entries before allocating the array because it's likely to
674 be faster. We can be called multiple times, should we have failed to
675 allocate a sorted fde array on a previous occasion. */
677 static inline void
678 init_object (struct object* ob)
680 struct fde_accumulator accu;
681 size_t count;
683 count = ob->s.b.count;
684 if (count == 0)
686 if (ob->s.b.from_array)
688 fde **p = ob->u.array;
689 for (count = 0; *p; ++p)
690 count += classify_object_over_fdes (ob, *p);
692 else
693 count = classify_object_over_fdes (ob, ob->u.single);
695 /* The count field we have in the main struct object is somewhat
696 limited, but should suffice for virtually all cases. If the
697 counted value doesn't fit, re-write a zero. The worst that
698 happens is that we re-count next time -- admittedly non-trivial
699 in that this implies some 2M fdes, but at least we function. */
700 ob->s.b.count = count;
701 if (ob->s.b.count != count)
702 ob->s.b.count = 0;
705 if (!start_fde_sort (&accu, count))
706 return;
708 if (ob->s.b.from_array)
710 fde **p;
711 for (p = ob->u.array; *p; ++p)
712 add_fdes (ob, &accu, *p);
714 else
715 add_fdes (ob, &accu, ob->u.single);
717 end_fde_sort (ob, &accu, count);
719 /* Save the original fde pointer, since this is the key by which the
720 DSO will deregister the object. */
721 accu.linear->orig_data = ob->u.single;
722 ob->u.sort = accu.linear;
724 ob->s.b.sorted = 1;
727 /* A linear search through a set of FDEs for the given PC. This is
728 used when there was insufficient memory to allocate and sort an
729 array. */
731 static fde *
732 linear_search_fdes (struct object *ob, fde *this_fde, void *pc)
734 struct dwarf_cie *last_cie = 0;
735 int encoding = ob->s.b.encoding;
736 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob);
738 for (; this_fde->length != 0; this_fde = next_fde (this_fde))
740 struct dwarf_cie *this_cie;
741 _Unwind_Ptr pc_begin, pc_range;
743 /* Skip CIEs. */
744 if (this_fde->CIE_delta == 0)
745 continue;
747 if (ob->s.b.mixed_encoding)
749 /* Determine the encoding for this FDE. Note mixed encoded
750 objects for later. */
751 this_cie = get_cie (this_fde);
752 if (this_cie != last_cie)
754 last_cie = this_cie;
755 encoding = get_cie_encoding (this_cie);
756 base = base_from_object (encoding, ob);
760 if (encoding == DW_EH_PE_absptr)
762 pc_begin = ((_Unwind_Ptr *)this_fde->pc_begin)[0];
763 pc_range = ((_Unwind_Ptr *)this_fde->pc_begin)[1];
764 if (pc_begin == 0)
765 continue;
767 else
769 _Unwind_Ptr mask;
770 const char *p;
772 p = read_encoded_value_with_base (encoding, base,
773 this_fde->pc_begin, &pc_begin);
774 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
776 /* Take care to ignore link-once functions that were removed.
777 In these cases, the function address will be NULL, but if
778 the encoding is smaller than a pointer a true NULL may not
779 be representable. Assume 0 in the representable bits is NULL. */
780 mask = size_of_encoded_value (encoding);
781 if (mask < sizeof (void *))
782 mask = (1L << (mask << 3)) - 1;
783 else
784 mask = -1;
786 if ((pc_begin & mask) == 0)
787 continue;
790 if ((_Unwind_Ptr)pc - pc_begin < pc_range)
791 return this_fde;
794 return NULL;
797 /* Binary search for an FDE containing the given PC. Here are three
798 implementations of increasing complexity. */
800 static inline fde *
801 binary_search_unencoded_fdes (struct object *ob, void *pc)
803 struct fde_vector *vec = ob->u.sort;
804 size_t lo, hi;
806 for (lo = 0, hi = vec->count; lo < hi; )
808 size_t i = (lo + hi) / 2;
809 fde *f = vec->array[i];
810 void *pc_begin;
811 uaddr pc_range;
813 pc_begin = ((void **)f->pc_begin)[0];
814 pc_range = ((uaddr *)f->pc_begin)[1];
816 if (pc < pc_begin)
817 hi = i;
818 else if (pc >= pc_begin + pc_range)
819 lo = i + 1;
820 else
821 return f;
824 return NULL;
827 static inline fde *
828 binary_search_single_encoding_fdes (struct object *ob, void *pc)
830 struct fde_vector *vec = ob->u.sort;
831 int encoding = ob->s.b.encoding;
832 _Unwind_Ptr base = base_from_object (encoding, ob);
833 size_t lo, hi;
835 for (lo = 0, hi = vec->count; lo < hi; )
837 size_t i = (lo + hi) / 2;
838 fde *f = vec->array[i];
839 _Unwind_Ptr pc_begin, pc_range;
840 const char *p;
842 p = read_encoded_value_with_base (encoding, base, f->pc_begin,
843 &pc_begin);
844 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
846 if ((_Unwind_Ptr)pc < pc_begin)
847 hi = i;
848 else if ((_Unwind_Ptr)pc >= pc_begin + pc_range)
849 lo = i + 1;
850 else
851 return f;
854 return NULL;
857 static inline fde *
858 binary_search_mixed_encoding_fdes (struct object *ob, void *pc)
860 struct fde_vector *vec = ob->u.sort;
861 size_t lo, hi;
863 for (lo = 0, hi = vec->count; lo < hi; )
865 size_t i = (lo + hi) / 2;
866 fde *f = vec->array[i];
867 _Unwind_Ptr pc_begin, pc_range;
868 const char *p;
869 int encoding;
871 encoding = get_fde_encoding (f);
872 p = read_encoded_value_with_base (encoding,
873 base_from_object (encoding, ob),
874 f->pc_begin, &pc_begin);
875 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range);
877 if ((_Unwind_Ptr)pc < pc_begin)
878 hi = i;
879 else if ((_Unwind_Ptr)pc >= pc_begin + pc_range)
880 lo = i + 1;
881 else
882 return f;
885 return NULL;
888 static fde *
889 search_object (struct object* ob, void *pc)
891 /* If the data hasn't been sorted, try to do this now. We may have
892 more memory available than last time we tried. */
893 if (! ob->s.b.sorted)
895 init_object (ob);
897 /* Despite the above comment, the normal reason to get here is
898 that we've not processed this object before. A quick range
899 check is in order. */
900 if (pc < ob->pc_begin)
901 return NULL;
904 if (ob->s.b.sorted)
906 if (ob->s.b.mixed_encoding)
907 return binary_search_mixed_encoding_fdes (ob, pc);
908 else if (ob->s.b.encoding == DW_EH_PE_absptr)
909 return binary_search_unencoded_fdes (ob, pc);
910 else
911 return binary_search_single_encoding_fdes (ob, pc);
913 else
915 /* Long slow labourious linear search, cos we've no memory. */
916 if (ob->s.b.from_array)
918 fde **p;
919 for (p = ob->u.array; *p ; p++)
921 fde *f = linear_search_fdes (ob, *p, pc);
922 if (f)
923 return f;
925 return NULL;
927 else
928 return linear_search_fdes (ob, ob->u.single, pc);
932 fde *
933 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases)
935 struct object *ob;
936 fde *f = NULL;
938 init_object_mutex_once ();
939 __gthread_mutex_lock (&object_mutex);
941 /* Linear search through the classified objects, to find the one
942 containing the pc. Note that pc_begin is sorted decending, and
943 we expect objects to be non-overlapping. */
944 for (ob = seen_objects; ob; ob = ob->next)
945 if (pc >= ob->pc_begin)
947 f = search_object (ob, pc);
948 if (f)
949 goto fini;
950 break;
953 /* Classify and search the objects we've not yet processed. */
954 while ((ob = unseen_objects))
956 struct object **p;
958 unseen_objects = ob->next;
959 f = search_object (ob, pc);
961 /* Insert the object into the classified list. */
962 for (p = &seen_objects; *p ; p = &(*p)->next)
963 if ((*p)->pc_begin < ob->pc_begin)
964 break;
965 ob->next = *p;
966 *p = ob;
968 if (f)
969 goto fini;
972 fini:
973 __gthread_mutex_unlock (&object_mutex);
975 if (f)
977 int encoding;
979 bases->tbase = ob->tbase;
980 bases->dbase = ob->dbase;
982 encoding = ob->s.b.encoding;
983 if (ob->s.b.mixed_encoding)
984 encoding = get_fde_encoding (f);
985 read_encoded_value_with_base (encoding, base_from_object (encoding, ob),
986 f->pc_begin, (_Unwind_Ptr *)&bases->func);
989 return f;