1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2014 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of the GNU C Library.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; if not, see
19 <http://www.gnu.org/licenses/>. */
22 # include <shlib-compat.h>
25 #if !defined _LIBC || SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2_5)
30 #include <bits/libc-lock.h>
33 #define NO_BASE_OF_ENCODED_VALUE
34 #include <unwind-pe.h>
35 #include <unwind-dw2-fde.h>
37 #ifndef _Unwind_Find_FDE
42 #define NO_BASE_OF_ENCODED_VALUE
43 #include "unwind-pe.h"
44 #include "unwind-dw2-fde.h"
49 /* The unseen_objects list contains objects that have been registered
50 but not yet categorized in any way. The seen_objects list has had
51 it's pc_begin and count fields initialized at minimum, and is sorted
52 by decreasing value of pc_begin. */
53 static struct object
*unseen_objects
;
54 static struct object
*seen_objects
;
58 __libc_lock_define_initialized (static, object_mutex
)
59 #define init_object_mutex_once()
60 #define __gthread_mutex_lock(m) __libc_lock_lock (*(m))
61 #define __gthread_mutex_unlock(m) __libc_lock_unlock (*(m))
63 void __register_frame_info_bases_internal (void *begin
, struct object
*ob
,
64 void *tbase
, void *dbase
);
65 void __register_frame_info_table_bases_internal (void *begin
,
67 void *tbase
, void *dbase
);
68 void *__deregister_frame_info_bases_internal (void *begin
);
72 #ifdef __GTHREAD_MUTEX_INIT
73 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
75 static __gthread_mutex_t object_mutex
;
78 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
80 init_object_mutex (void)
82 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
86 init_object_mutex_once (void)
88 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
89 __gthread_once (&once
, init_object_mutex
);
92 #define init_object_mutex_once()
97 /* Called from crtbegin.o to register the unwind info for an object. */
100 __register_frame_info_bases (void *begin
, struct object
*ob
,
101 void *tbase
, void *dbase
)
103 /* If .eh_frame is empty, don't register at all. */
104 if (*(uword
*) begin
== 0)
107 ob
->pc_begin
= (void *)-1;
110 ob
->u
.single
= begin
;
112 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
113 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
117 init_object_mutex_once ();
118 __gthread_mutex_lock (&object_mutex
);
120 ob
->next
= unseen_objects
;
123 __gthread_mutex_unlock (&object_mutex
);
125 INTDEF(__register_frame_info_bases
)
128 __register_frame_info (void *begin
, struct object
*ob
)
130 INTUSE(__register_frame_info_bases
) (begin
, ob
, 0, 0);
134 __register_frame (void *begin
)
138 /* If .eh_frame is empty, don't register at all. */
139 if (*(uword
*) begin
== 0)
142 ob
= (struct object
*) malloc (sizeof (struct object
));
143 INTUSE(__register_frame_info_bases
) (begin
, ob
, 0, 0);
146 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
147 for different translation units. Called from the file generated by
151 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
152 void *tbase
, void *dbase
)
154 ob
->pc_begin
= (void *)-1;
159 ob
->s
.b
.from_array
= 1;
160 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
162 init_object_mutex_once ();
163 __gthread_mutex_lock (&object_mutex
);
165 ob
->next
= unseen_objects
;
168 __gthread_mutex_unlock (&object_mutex
);
170 INTDEF(__register_frame_info_table_bases
)
173 __register_frame_info_table (void *begin
, struct object
*ob
)
175 INTUSE(__register_frame_info_table_bases
) (begin
, ob
, 0, 0);
179 __register_frame_table (void *begin
)
181 struct object
*ob
= (struct object
*) malloc (sizeof (struct object
));
182 INTUSE(__register_frame_info_table_bases
) (begin
, ob
, 0, 0);
185 /* Called from crtbegin.o to deregister the unwind info for an object. */
186 /* ??? Glibc has for a while now exported __register_frame_info and
187 __deregister_frame_info. If we call __register_frame_info_bases
188 from crtbegin (wherein it is declared weak), and this object does
189 not get pulled from libgcc.a for other reasons, then the
190 invocation of __deregister_frame_info will be resolved from glibc.
191 Since the registration did not happen there, we'll abort.
193 Therefore, declare a new deregistration entry point that does the
194 exact same thing, but will resolve to the same library as
195 implements __register_frame_info_bases. */
198 __deregister_frame_info_bases (void *begin
)
201 struct object
*ob
= 0;
203 /* If .eh_frame is empty, we haven't registered. */
204 if (*(uword
*) begin
== 0)
207 init_object_mutex_once ();
208 __gthread_mutex_lock (&object_mutex
);
210 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
211 if ((*p
)->u
.single
== begin
)
218 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
219 if ((*p
)->s
.b
.sorted
)
221 if ((*p
)->u
.sort
->orig_data
== begin
)
231 if ((*p
)->u
.single
== begin
)
239 __gthread_mutex_unlock (&object_mutex
);
243 __gthread_mutex_unlock (&object_mutex
);
246 INTDEF(__deregister_frame_info_bases
)
249 __deregister_frame_info (void *begin
)
251 return INTUSE(__deregister_frame_info_bases
) (begin
);
255 __deregister_frame (void *begin
)
257 /* If .eh_frame is empty, we haven't registered. */
258 if (*(uword
*) begin
!= 0)
259 free (INTUSE(__deregister_frame_info_bases
) (begin
));
263 /* Like base_of_encoded_value, but take the base from a struct object
264 instead of an _Unwind_Context. */
267 base_from_object (unsigned char encoding
, struct object
*ob
)
269 if (encoding
== DW_EH_PE_omit
)
272 switch (encoding
& 0x70)
274 case DW_EH_PE_absptr
:
276 case DW_EH_PE_aligned
:
279 case DW_EH_PE_textrel
:
280 return (_Unwind_Ptr
) ob
->tbase
;
281 case DW_EH_PE_datarel
:
282 return (_Unwind_Ptr
) ob
->dbase
;
287 /* Return the FDE pointer encoding from the CIE. */
288 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
291 get_cie_encoding (struct dwarf_cie
*cie
)
293 const unsigned char *aug
, *p
;
298 aug
= cie
->augmentation
;
300 return DW_EH_PE_absptr
;
302 /* Skip the augmentation string. */
303 p
= aug
+ strlen ((const char *) aug
) + 1;
304 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
305 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
306 p
++; /* Skip return address column. */
308 aug
++; /* Skip 'z' */
309 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
312 /* This is what we're looking for. */
315 /* Personality encoding and pointer. */
316 else if (*aug
== 'P')
318 /* ??? Avoid dereferencing indirect pointers, since we're
319 faking the base address. Gotta keep DW_EH_PE_aligned
321 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
324 else if (*aug
== 'L')
326 /* Otherwise end of string, or unknown augmentation. */
328 return DW_EH_PE_absptr
;
334 get_fde_encoding (struct dwarf_fde
*f
)
336 return get_cie_encoding (get_cie (f
));
340 /* Sorting an array of FDEs by address.
341 (Ideally we would have the linker sort the FDEs so we don't have to do
342 it at run time. But the linkers are not yet prepared for this.) */
344 /* Return the Nth pc_begin value from FDE x. */
346 static inline _Unwind_Ptr
347 get_pc_begin (fde
*x
, size_t n
)
350 memcpy (&p
, x
->pc_begin
+ n
* sizeof (_Unwind_Ptr
), sizeof (_Unwind_Ptr
));
354 /* Comparison routines. Three variants of increasing complexity. */
357 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
360 _Unwind_Ptr x_ptr
= get_pc_begin (x
, 0);
361 _Unwind_Ptr y_ptr
= get_pc_begin (y
, 0);
371 fde_single_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
373 _Unwind_Ptr base
, x_ptr
, y_ptr
;
375 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
376 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
377 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
387 fde_mixed_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
389 int x_encoding
, y_encoding
;
390 _Unwind_Ptr x_ptr
, y_ptr
;
392 x_encoding
= get_fde_encoding (x
);
393 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
394 x
->pc_begin
, &x_ptr
);
396 y_encoding
= get_fde_encoding (y
);
397 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
398 y
->pc_begin
, &y_ptr
);
407 typedef int (*fde_compare_t
) (struct object
*, fde
*, fde
*);
410 /* This is a special mix of insertion sort and heap sort, optimized for
411 the data sets that actually occur. They look like
412 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
413 I.e. a linearly increasing sequence (coming from functions in the text
414 section), with additionally a few unordered elements (coming from functions
415 in gnu_linkonce sections) whose values are higher than the values in the
416 surrounding linear sequence (but not necessarily higher than the values
417 at the end of the linear sequence!).
418 The worst-case total run time is O(N) + O(n log (n)), where N is the
419 total number of FDEs and n is the number of erratic ones. */
421 struct fde_accumulator
423 struct fde_vector
*linear
;
424 struct fde_vector
*erratic
;
428 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
434 size
= sizeof (struct fde_vector
) + sizeof (fde
*) * count
;
435 if ((accu
->linear
= (struct fde_vector
*) malloc (size
)))
437 accu
->linear
->count
= 0;
438 if ((accu
->erratic
= (struct fde_vector
*) malloc (size
)))
439 accu
->erratic
->count
= 0;
447 fde_insert (struct fde_accumulator
*accu
, fde
*this_fde
)
450 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
453 /* Split LINEAR into a linear sequence with low values and an erratic
454 sequence with high values, put the linear one (of longest possible
455 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
457 Because the longest linear sequence we are trying to locate within the
458 incoming LINEAR array can be interspersed with (high valued) erratic
459 entries. We construct a chain indicating the sequenced entries.
460 To avoid having to allocate this chain, we overlay it onto the space of
461 the ERRATIC array during construction. A final pass iterates over the
462 chain to determine what should be placed in the ERRATIC array, and
463 what is the linear sequence. This overlay is safe from aliasing. */
466 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
467 struct fde_vector
*linear
, struct fde_vector
*erratic
)
470 size_t count
= linear
->count
;
471 fde
**chain_end
= &marker
;
474 /* This should optimize out, but it is wise to make sure this assumption
475 is correct. Should these have different sizes, we cannot cast between
476 them and the overlaying onto ERRATIC will not work. */
477 if (sizeof (fde
*) != sizeof (fde
**))
480 for (i
= 0; i
< count
; i
++)
484 for (probe
= chain_end
;
485 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
488 chain_end
= (fde
**) erratic
->array
[probe
- linear
->array
];
489 erratic
->array
[probe
- linear
->array
] = NULL
;
491 erratic
->array
[i
] = (fde
*) chain_end
;
492 chain_end
= &linear
->array
[i
];
495 /* Each entry in LINEAR which is part of the linear sequence we have
496 discovered will correspond to a non-NULL entry in the chain we built in
497 the ERRATIC array. */
498 for (i
= j
= k
= 0; i
< count
; i
++)
499 if (erratic
->array
[i
])
500 linear
->array
[j
++] = linear
->array
[i
];
502 erratic
->array
[k
++] = linear
->array
[i
];
507 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
508 use a name that does not conflict. */
511 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
512 struct fde_vector
*erratic
)
514 /* For a description of this algorithm, see:
515 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
517 fde
** a
= erratic
->array
;
518 /* A portion of the array is called a "heap" if for all i>=0:
519 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
520 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
521 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
522 size_t n
= erratic
->count
;
528 /* Invariant: a[m..n-1] is a heap. */
530 for (i
= m
; 2*i
+1 < n
; )
533 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
534 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
536 SWAP (a
[i
], a
[2*i
+2]);
539 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
541 SWAP (a
[i
], a
[2*i
+1]);
550 /* Invariant: a[0..n-1] is a heap. */
553 for (i
= 0; 2*i
+1 < n
; )
556 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
557 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
559 SWAP (a
[i
], a
[2*i
+2]);
562 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
564 SWAP (a
[i
], a
[2*i
+1]);
574 /* Merge V1 and V2, both sorted, and put the result into V1. */
576 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
577 struct fde_vector
*v1
, struct fde_vector
*v2
)
589 fde2
= v2
->array
[i2
];
590 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
592 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
595 v1
->array
[i1
+i2
] = fde2
;
598 v1
->count
+= v2
->count
;
603 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
605 fde_compare_t fde_compare
;
607 if (accu
->linear
->count
!= count
)
610 if (ob
->s
.b
.mixed_encoding
)
611 fde_compare
= fde_mixed_encoding_compare
;
612 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
613 fde_compare
= fde_unencoded_compare
;
615 fde_compare
= fde_single_encoding_compare
;
619 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
620 if (accu
->linear
->count
+ accu
->erratic
->count
!= count
)
622 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
623 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
624 free (accu
->erratic
);
628 /* We've not managed to malloc an erratic array,
629 so heap sort in the linear one. */
630 frame_heapsort (ob
, fde_compare
, accu
->linear
);
635 /* Update encoding, mixed_encoding, and pc_begin for OB for the
636 fde array beginning at THIS_FDE. Return the number of fdes
637 encountered along the way. */
640 classify_object_over_fdes (struct object
*ob
, fde
*this_fde
)
642 struct dwarf_cie
*last_cie
= 0;
644 int encoding
= DW_EH_PE_absptr
;
645 _Unwind_Ptr base
= 0;
647 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
649 struct dwarf_cie
*this_cie
;
650 _Unwind_Ptr mask
, pc_begin
;
653 if (this_fde
->CIE_delta
== 0)
656 /* Determine the encoding for this FDE. Note mixed encoded
657 objects for later. */
658 this_cie
= get_cie (this_fde
);
659 if (this_cie
!= last_cie
)
662 encoding
= get_cie_encoding (this_cie
);
663 base
= base_from_object (encoding
, ob
);
664 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
665 ob
->s
.b
.encoding
= encoding
;
666 else if (ob
->s
.b
.encoding
!= encoding
)
667 ob
->s
.b
.mixed_encoding
= 1;
670 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
673 /* Take care to ignore link-once functions that were removed.
674 In these cases, the function address will be NULL, but if
675 the encoding is smaller than a pointer a true NULL may not
676 be representable. Assume 0 in the representable bits is NULL. */
677 mask
= size_of_encoded_value (encoding
);
678 if (mask
< sizeof (void *))
679 mask
= (1L << (mask
<< 3)) - 1;
683 if ((pc_begin
& mask
) == 0)
687 if ((void *) pc_begin
< ob
->pc_begin
)
688 ob
->pc_begin
= (void *) pc_begin
;
695 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, fde
*this_fde
)
697 struct dwarf_cie
*last_cie
= 0;
698 int encoding
= ob
->s
.b
.encoding
;
699 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
701 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
703 struct dwarf_cie
*this_cie
;
706 if (this_fde
->CIE_delta
== 0)
709 if (ob
->s
.b
.mixed_encoding
)
711 /* Determine the encoding for this FDE. Note mixed encoded
712 objects for later. */
713 this_cie
= get_cie (this_fde
);
714 if (this_cie
!= last_cie
)
717 encoding
= get_cie_encoding (this_cie
);
718 base
= base_from_object (encoding
, ob
);
722 if (encoding
== DW_EH_PE_absptr
)
724 if (get_pc_begin (this_fde
, 0) == 0)
729 _Unwind_Ptr pc_begin
, mask
;
731 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
734 /* Take care to ignore link-once functions that were removed.
735 In these cases, the function address will be NULL, but if
736 the encoding is smaller than a pointer a true NULL may not
737 be representable. Assume 0 in the representable bits is NULL. */
738 mask
= size_of_encoded_value (encoding
);
739 if (mask
< sizeof (void *))
740 mask
= (1L << (mask
<< 3)) - 1;
744 if ((pc_begin
& mask
) == 0)
748 fde_insert (accu
, this_fde
);
752 /* Set up a sorted array of pointers to FDEs for a loaded object. We
753 count up the entries before allocating the array because it's likely to
754 be faster. We can be called multiple times, should we have failed to
755 allocate a sorted fde array on a previous occasion. */
758 init_object (struct object
* ob
)
760 struct fde_accumulator accu
;
763 count
= ob
->s
.b
.count
;
766 if (ob
->s
.b
.from_array
)
768 fde
**p
= ob
->u
.array
;
769 for (count
= 0; *p
; ++p
)
770 count
+= classify_object_over_fdes (ob
, *p
);
773 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
775 /* The count field we have in the main struct object is somewhat
776 limited, but should suffice for virtually all cases. If the
777 counted value doesn't fit, re-write a zero. The worst that
778 happens is that we re-count next time -- admittedly non-trivial
779 in that this implies some 2M fdes, but at least we function. */
780 ob
->s
.b
.count
= count
;
781 if (ob
->s
.b
.count
!= count
)
785 if (!start_fde_sort (&accu
, count
))
788 if (ob
->s
.b
.from_array
)
791 for (p
= ob
->u
.array
; *p
; ++p
)
792 add_fdes (ob
, &accu
, *p
);
795 add_fdes (ob
, &accu
, ob
->u
.single
);
797 end_fde_sort (ob
, &accu
, count
);
799 /* Save the original fde pointer, since this is the key by which the
800 DSO will deregister the object. */
801 accu
.linear
->orig_data
= ob
->u
.single
;
802 ob
->u
.sort
= accu
.linear
;
807 /* A linear search through a set of FDEs for the given PC. This is
808 used when there was insufficient memory to allocate and sort an
812 linear_search_fdes (struct object
*ob
, fde
*this_fde
, void *pc
)
814 struct dwarf_cie
*last_cie
= 0;
815 int encoding
= ob
->s
.b
.encoding
;
816 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
818 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
820 struct dwarf_cie
*this_cie
;
821 _Unwind_Ptr pc_begin
, pc_range
;
824 if (this_fde
->CIE_delta
== 0)
827 if (ob
->s
.b
.mixed_encoding
)
829 /* Determine the encoding for this FDE. Note mixed encoded
830 objects for later. */
831 this_cie
= get_cie (this_fde
);
832 if (this_cie
!= last_cie
)
835 encoding
= get_cie_encoding (this_cie
);
836 base
= base_from_object (encoding
, ob
);
840 if (encoding
== DW_EH_PE_absptr
)
842 pc_begin
= get_pc_begin (this_fde
, 0);
843 pc_range
= get_pc_begin (this_fde
, 1);
850 const unsigned char *p
;
852 p
= read_encoded_value_with_base (encoding
, base
,
853 this_fde
->pc_begin
, &pc_begin
);
854 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
856 /* Take care to ignore link-once functions that were removed.
857 In these cases, the function address will be NULL, but if
858 the encoding is smaller than a pointer a true NULL may not
859 be representable. Assume 0 in the representable bits is NULL. */
860 mask
= size_of_encoded_value (encoding
);
861 if (mask
< sizeof (void *))
862 mask
= (1L << (mask
<< 3)) - 1;
866 if ((pc_begin
& mask
) == 0)
870 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
877 /* Binary search for an FDE containing the given PC. Here are three
878 implementations of increasing complexity. */
881 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
883 struct fde_vector
*vec
= ob
->u
.sort
;
886 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
888 size_t i
= (lo
+ hi
) / 2;
889 fde
*f
= vec
->array
[i
];
893 pc_begin
= (void *) get_pc_begin (f
, 0);
894 pc_range
= (uaddr
) get_pc_begin (f
, 1);
898 else if (pc
>= pc_begin
+ pc_range
)
908 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
910 struct fde_vector
*vec
= ob
->u
.sort
;
911 int encoding
= ob
->s
.b
.encoding
;
912 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
915 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
917 size_t i
= (lo
+ hi
) / 2;
918 fde
*f
= vec
->array
[i
];
919 _Unwind_Ptr pc_begin
, pc_range
;
920 const unsigned char *p
;
922 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
924 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
926 if ((_Unwind_Ptr
) pc
< pc_begin
)
928 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
938 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
940 struct fde_vector
*vec
= ob
->u
.sort
;
943 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
945 size_t i
= (lo
+ hi
) / 2;
946 fde
*f
= vec
->array
[i
];
947 _Unwind_Ptr pc_begin
, pc_range
;
948 const unsigned char *p
;
951 encoding
= get_fde_encoding (f
);
952 p
= read_encoded_value_with_base (encoding
,
953 base_from_object (encoding
, ob
),
954 f
->pc_begin
, &pc_begin
);
955 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
957 if ((_Unwind_Ptr
) pc
< pc_begin
)
959 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
969 search_object (struct object
* ob
, void *pc
)
971 /* If the data hasn't been sorted, try to do this now. We may have
972 more memory available than last time we tried. */
973 if (! ob
->s
.b
.sorted
)
977 /* Despite the above comment, the normal reason to get here is
978 that we've not processed this object before. A quick range
979 check is in order. */
980 if (pc
< ob
->pc_begin
)
986 if (ob
->s
.b
.mixed_encoding
)
987 return binary_search_mixed_encoding_fdes (ob
, pc
);
988 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
989 return binary_search_unencoded_fdes (ob
, pc
);
991 return binary_search_single_encoding_fdes (ob
, pc
);
995 /* Long slow labourious linear search, cos we've no memory. */
996 if (ob
->s
.b
.from_array
)
999 for (p
= ob
->u
.array
; *p
; p
++)
1001 fde
*f
= linear_search_fdes (ob
, *p
, pc
);
1008 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
1013 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
1018 init_object_mutex_once ();
1019 __gthread_mutex_lock (&object_mutex
);
1021 /* Linear search through the classified objects, to find the one
1022 containing the pc. Note that pc_begin is sorted descending, and
1023 we expect objects to be non-overlapping. */
1024 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1025 if (pc
>= ob
->pc_begin
)
1027 f
= search_object (ob
, pc
);
1033 /* Classify and search the objects we've not yet processed. */
1034 while ((ob
= unseen_objects
))
1038 unseen_objects
= ob
->next
;
1039 f
= search_object (ob
, pc
);
1041 /* Insert the object into the classified list. */
1042 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1043 if ((*p
)->pc_begin
< ob
->pc_begin
)
1053 __gthread_mutex_unlock (&object_mutex
);
1060 bases
->tbase
= ob
->tbase
;
1061 bases
->dbase
= ob
->dbase
;
1063 encoding
= ob
->s
.b
.encoding
;
1064 if (ob
->s
.b
.mixed_encoding
)
1065 encoding
= get_fde_encoding (f
);
1066 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1067 f
->pc_begin
, &func
);
1068 bases
->func
= (void *) func
;