1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2006, 2007
3 Free Software Foundation, Inc.
4 Contributed by Jason Merrill <jason@cygnus.com>.
6 This file is part of the GNU C Library.
8 The GNU C Library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
13 The GNU C Library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with the GNU C Library; if not, write to the Free
20 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 # include <shlib-compat.h>
27 #if !defined _LIBC || SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2_5)
32 #include <bits/libc-lock.h>
35 #define NO_BASE_OF_ENCODED_VALUE
36 #include <unwind-pe.h>
37 #include <unwind-dw2-fde.h>
39 #ifndef _Unwind_Find_FDE
44 #define NO_BASE_OF_ENCODED_VALUE
45 #include "unwind-pe.h"
46 #include "unwind-dw2-fde.h"
51 /* The unseen_objects list contains objects that have been registered
52 but not yet categorized in any way. The seen_objects list has had
53 it's pc_begin and count fields initialized at minimum, and is sorted
54 by decreasing value of pc_begin. */
55 static struct object
*unseen_objects
;
56 static struct object
*seen_objects
;
60 __libc_lock_define_initialized (static, object_mutex
)
61 #define init_object_mutex_once()
62 #define __gthread_mutex_lock(m) __libc_lock_lock (*(m))
63 #define __gthread_mutex_unlock(m) __libc_lock_unlock (*(m))
65 void __register_frame_info_bases_internal (void *begin
, struct object
*ob
,
66 void *tbase
, void *dbase
);
67 void __register_frame_info_table_bases_internal (void *begin
,
69 void *tbase
, void *dbase
);
70 void *__deregister_frame_info_bases_internal (void *begin
);
74 #ifdef __GTHREAD_MUTEX_INIT
75 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
77 static __gthread_mutex_t object_mutex
;
80 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
82 init_object_mutex (void)
84 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
88 init_object_mutex_once (void)
90 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
91 __gthread_once (&once
, init_object_mutex
);
94 #define init_object_mutex_once()
99 /* Called from crtbegin.o to register the unwind info for an object. */
102 __register_frame_info_bases (void *begin
, struct object
*ob
,
103 void *tbase
, void *dbase
)
105 /* If .eh_frame is empty, don't register at all. */
106 if (*(uword
*) begin
== 0)
109 ob
->pc_begin
= (void *)-1;
112 ob
->u
.single
= begin
;
114 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
115 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
119 init_object_mutex_once ();
120 __gthread_mutex_lock (&object_mutex
);
122 ob
->next
= unseen_objects
;
125 __gthread_mutex_unlock (&object_mutex
);
127 INTDEF(__register_frame_info_bases
)
130 __register_frame_info (void *begin
, struct object
*ob
)
132 INTUSE(__register_frame_info_bases
) (begin
, ob
, 0, 0);
136 __register_frame (void *begin
)
140 /* If .eh_frame is empty, don't register at all. */
141 if (*(uword
*) begin
== 0)
144 ob
= (struct object
*) malloc (sizeof (struct object
));
145 INTUSE(__register_frame_info_bases
) (begin
, ob
, 0, 0);
148 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
149 for different translation units. Called from the file generated by
153 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
154 void *tbase
, void *dbase
)
156 ob
->pc_begin
= (void *)-1;
161 ob
->s
.b
.from_array
= 1;
162 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
164 init_object_mutex_once ();
165 __gthread_mutex_lock (&object_mutex
);
167 ob
->next
= unseen_objects
;
170 __gthread_mutex_unlock (&object_mutex
);
172 INTDEF(__register_frame_info_table_bases
)
175 __register_frame_info_table (void *begin
, struct object
*ob
)
177 INTUSE(__register_frame_info_table_bases
) (begin
, ob
, 0, 0);
181 __register_frame_table (void *begin
)
183 struct object
*ob
= (struct object
*) malloc (sizeof (struct object
));
184 INTUSE(__register_frame_info_table_bases
) (begin
, ob
, 0, 0);
187 /* Called from crtbegin.o to deregister the unwind info for an object. */
188 /* ??? Glibc has for a while now exported __register_frame_info and
189 __deregister_frame_info. If we call __register_frame_info_bases
190 from crtbegin (wherein it is declared weak), and this object does
191 not get pulled from libgcc.a for other reasons, then the
192 invocation of __deregister_frame_info will be resolved from glibc.
193 Since the registration did not happen there, we'll abort.
195 Therefore, declare a new deregistration entry point that does the
196 exact same thing, but will resolve to the same library as
197 implements __register_frame_info_bases. */
200 __deregister_frame_info_bases (void *begin
)
203 struct object
*ob
= 0;
205 /* If .eh_frame is empty, we haven't registered. */
206 if (*(uword
*) begin
== 0)
209 init_object_mutex_once ();
210 __gthread_mutex_lock (&object_mutex
);
212 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
213 if ((*p
)->u
.single
== begin
)
220 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
221 if ((*p
)->s
.b
.sorted
)
223 if ((*p
)->u
.sort
->orig_data
== begin
)
233 if ((*p
)->u
.single
== begin
)
241 __gthread_mutex_unlock (&object_mutex
);
245 __gthread_mutex_unlock (&object_mutex
);
248 INTDEF(__deregister_frame_info_bases
)
251 __deregister_frame_info (void *begin
)
253 return INTUSE(__deregister_frame_info_bases
) (begin
);
257 __deregister_frame (void *begin
)
259 /* If .eh_frame is empty, we haven't registered. */
260 if (*(uword
*) begin
!= 0)
261 free (INTUSE(__deregister_frame_info_bases
) (begin
));
265 /* Like base_of_encoded_value, but take the base from a struct object
266 instead of an _Unwind_Context. */
269 base_from_object (unsigned char encoding
, struct object
*ob
)
271 if (encoding
== DW_EH_PE_omit
)
274 switch (encoding
& 0x70)
276 case DW_EH_PE_absptr
:
278 case DW_EH_PE_aligned
:
281 case DW_EH_PE_textrel
:
282 return (_Unwind_Ptr
) ob
->tbase
;
283 case DW_EH_PE_datarel
:
284 return (_Unwind_Ptr
) ob
->dbase
;
289 /* Return the FDE pointer encoding from the CIE. */
290 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
293 get_cie_encoding (struct dwarf_cie
*cie
)
295 const unsigned char *aug
, *p
;
300 aug
= cie
->augmentation
;
302 return DW_EH_PE_absptr
;
304 /* Skip the augmentation string. */
305 p
= aug
+ strlen ((const char *) aug
) + 1;
306 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
307 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
308 p
++; /* Skip return address column. */
310 aug
++; /* Skip 'z' */
311 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
314 /* This is what we're looking for. */
317 /* Personality encoding and pointer. */
318 else if (*aug
== 'P')
320 /* ??? Avoid dereferencing indirect pointers, since we're
321 faking the base address. Gotta keep DW_EH_PE_aligned
323 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
326 else if (*aug
== 'L')
328 /* Otherwise end of string, or unknown augmentation. */
330 return DW_EH_PE_absptr
;
336 get_fde_encoding (struct dwarf_fde
*f
)
338 return get_cie_encoding (get_cie (f
));
342 /* Sorting an array of FDEs by address.
343 (Ideally we would have the linker sort the FDEs so we don't have to do
344 it at run time. But the linkers are not yet prepared for this.) */
346 /* Comparison routines. Three variants of increasing complexity. */
349 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
352 _Unwind_Ptr x_ptr
= *(_Unwind_Ptr
*) x
->pc_begin
;
353 _Unwind_Ptr y_ptr
= *(_Unwind_Ptr
*) y
->pc_begin
;
363 fde_single_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
365 _Unwind_Ptr base
, x_ptr
, y_ptr
;
367 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
368 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
369 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
379 fde_mixed_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
381 int x_encoding
, y_encoding
;
382 _Unwind_Ptr x_ptr
, y_ptr
;
384 x_encoding
= get_fde_encoding (x
);
385 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
386 x
->pc_begin
, &x_ptr
);
388 y_encoding
= get_fde_encoding (y
);
389 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
390 y
->pc_begin
, &y_ptr
);
399 typedef int (*fde_compare_t
) (struct object
*, fde
*, fde
*);
402 /* This is a special mix of insertion sort and heap sort, optimized for
403 the data sets that actually occur. They look like
404 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
405 I.e. a linearly increasing sequence (coming from functions in the text
406 section), with additionally a few unordered elements (coming from functions
407 in gnu_linkonce sections) whose values are higher than the values in the
408 surrounding linear sequence (but not necessarily higher than the values
409 at the end of the linear sequence!).
410 The worst-case total run time is O(N) + O(n log (n)), where N is the
411 total number of FDEs and n is the number of erratic ones. */
413 struct fde_accumulator
415 struct fde_vector
*linear
;
416 struct fde_vector
*erratic
;
420 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
426 size
= sizeof (struct fde_vector
) + sizeof (fde
*) * count
;
427 if ((accu
->linear
= (struct fde_vector
*) malloc (size
)))
429 accu
->linear
->count
= 0;
430 if ((accu
->erratic
= (struct fde_vector
*) malloc (size
)))
431 accu
->erratic
->count
= 0;
439 fde_insert (struct fde_accumulator
*accu
, fde
*this_fde
)
442 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
445 /* Split LINEAR into a linear sequence with low values and an erratic
446 sequence with high values, put the linear one (of longest possible
447 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
449 Because the longest linear sequence we are trying to locate within the
450 incoming LINEAR array can be interspersed with (high valued) erratic
451 entries. We construct a chain indicating the sequenced entries.
452 To avoid having to allocate this chain, we overlay it onto the space of
453 the ERRATIC array during construction. A final pass iterates over the
454 chain to determine what should be placed in the ERRATIC array, and
455 what is the linear sequence. This overlay is safe from aliasing. */
458 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
459 struct fde_vector
*linear
, struct fde_vector
*erratic
)
462 size_t count
= linear
->count
;
463 fde
**chain_end
= &marker
;
466 /* This should optimize out, but it is wise to make sure this assumption
467 is correct. Should these have different sizes, we cannot cast between
468 them and the overlaying onto ERRATIC will not work. */
469 if (sizeof (fde
*) != sizeof (fde
**))
472 for (i
= 0; i
< count
; i
++)
476 for (probe
= chain_end
;
477 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
480 chain_end
= (fde
**) erratic
->array
[probe
- linear
->array
];
481 erratic
->array
[probe
- linear
->array
] = NULL
;
483 erratic
->array
[i
] = (fde
*) chain_end
;
484 chain_end
= &linear
->array
[i
];
487 /* Each entry in LINEAR which is part of the linear sequence we have
488 discovered will correspond to a non-NULL entry in the chain we built in
489 the ERRATIC array. */
490 for (i
= j
= k
= 0; i
< count
; i
++)
491 if (erratic
->array
[i
])
492 linear
->array
[j
++] = linear
->array
[i
];
494 erratic
->array
[k
++] = linear
->array
[i
];
499 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
500 use a name that does not conflict. */
503 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
504 struct fde_vector
*erratic
)
506 /* For a description of this algorithm, see:
507 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
509 fde
** a
= erratic
->array
;
510 /* A portion of the array is called a "heap" if for all i>=0:
511 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
512 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
513 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
514 size_t n
= erratic
->count
;
520 /* Invariant: a[m..n-1] is a heap. */
522 for (i
= m
; 2*i
+1 < n
; )
525 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
526 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
528 SWAP (a
[i
], a
[2*i
+2]);
531 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
533 SWAP (a
[i
], a
[2*i
+1]);
542 /* Invariant: a[0..n-1] is a heap. */
545 for (i
= 0; 2*i
+1 < n
; )
548 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
549 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
551 SWAP (a
[i
], a
[2*i
+2]);
554 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
556 SWAP (a
[i
], a
[2*i
+1]);
566 /* Merge V1 and V2, both sorted, and put the result into V1. */
568 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
569 struct fde_vector
*v1
, struct fde_vector
*v2
)
581 fde2
= v2
->array
[i2
];
582 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
584 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
587 v1
->array
[i1
+i2
] = fde2
;
590 v1
->count
+= v2
->count
;
595 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
597 fde_compare_t fde_compare
;
599 if (accu
->linear
->count
!= count
)
602 if (ob
->s
.b
.mixed_encoding
)
603 fde_compare
= fde_mixed_encoding_compare
;
604 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
605 fde_compare
= fde_unencoded_compare
;
607 fde_compare
= fde_single_encoding_compare
;
611 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
612 if (accu
->linear
->count
+ accu
->erratic
->count
!= count
)
614 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
615 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
616 free (accu
->erratic
);
620 /* We've not managed to malloc an erratic array,
621 so heap sort in the linear one. */
622 frame_heapsort (ob
, fde_compare
, accu
->linear
);
627 /* Update encoding, mixed_encoding, and pc_begin for OB for the
628 fde array beginning at THIS_FDE. Return the number of fdes
629 encountered along the way. */
632 classify_object_over_fdes (struct object
*ob
, fde
*this_fde
)
634 struct dwarf_cie
*last_cie
= 0;
636 int encoding
= DW_EH_PE_absptr
;
637 _Unwind_Ptr base
= 0;
639 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
641 struct dwarf_cie
*this_cie
;
642 _Unwind_Ptr mask
, pc_begin
;
645 if (this_fde
->CIE_delta
== 0)
648 /* Determine the encoding for this FDE. Note mixed encoded
649 objects for later. */
650 this_cie
= get_cie (this_fde
);
651 if (this_cie
!= last_cie
)
654 encoding
= get_cie_encoding (this_cie
);
655 base
= base_from_object (encoding
, ob
);
656 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
657 ob
->s
.b
.encoding
= encoding
;
658 else if (ob
->s
.b
.encoding
!= encoding
)
659 ob
->s
.b
.mixed_encoding
= 1;
662 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
665 /* Take care to ignore link-once functions that were removed.
666 In these cases, the function address will be NULL, but if
667 the encoding is smaller than a pointer a true NULL may not
668 be representable. Assume 0 in the representable bits is NULL. */
669 mask
= size_of_encoded_value (encoding
);
670 if (mask
< sizeof (void *))
671 mask
= (1L << (mask
<< 3)) - 1;
675 if ((pc_begin
& mask
) == 0)
679 if ((void *) pc_begin
< ob
->pc_begin
)
680 ob
->pc_begin
= (void *) pc_begin
;
687 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, fde
*this_fde
)
689 struct dwarf_cie
*last_cie
= 0;
690 int encoding
= ob
->s
.b
.encoding
;
691 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
693 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
695 struct dwarf_cie
*this_cie
;
698 if (this_fde
->CIE_delta
== 0)
701 if (ob
->s
.b
.mixed_encoding
)
703 /* Determine the encoding for this FDE. Note mixed encoded
704 objects for later. */
705 this_cie
= get_cie (this_fde
);
706 if (this_cie
!= last_cie
)
709 encoding
= get_cie_encoding (this_cie
);
710 base
= base_from_object (encoding
, ob
);
714 if (encoding
== DW_EH_PE_absptr
)
716 if (*(_Unwind_Ptr
*) this_fde
->pc_begin
== 0)
721 _Unwind_Ptr pc_begin
, mask
;
723 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
726 /* Take care to ignore link-once functions that were removed.
727 In these cases, the function address will be NULL, but if
728 the encoding is smaller than a pointer a true NULL may not
729 be representable. Assume 0 in the representable bits is NULL. */
730 mask
= size_of_encoded_value (encoding
);
731 if (mask
< sizeof (void *))
732 mask
= (1L << (mask
<< 3)) - 1;
736 if ((pc_begin
& mask
) == 0)
740 fde_insert (accu
, this_fde
);
744 /* Set up a sorted array of pointers to FDEs for a loaded object. We
745 count up the entries before allocating the array because it's likely to
746 be faster. We can be called multiple times, should we have failed to
747 allocate a sorted fde array on a previous occasion. */
750 init_object (struct object
* ob
)
752 struct fde_accumulator accu
;
755 count
= ob
->s
.b
.count
;
758 if (ob
->s
.b
.from_array
)
760 fde
**p
= ob
->u
.array
;
761 for (count
= 0; *p
; ++p
)
762 count
+= classify_object_over_fdes (ob
, *p
);
765 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
767 /* The count field we have in the main struct object is somewhat
768 limited, but should suffice for virtually all cases. If the
769 counted value doesn't fit, re-write a zero. The worst that
770 happens is that we re-count next time -- admittedly non-trivial
771 in that this implies some 2M fdes, but at least we function. */
772 ob
->s
.b
.count
= count
;
773 if (ob
->s
.b
.count
!= count
)
777 if (!start_fde_sort (&accu
, count
))
780 if (ob
->s
.b
.from_array
)
783 for (p
= ob
->u
.array
; *p
; ++p
)
784 add_fdes (ob
, &accu
, *p
);
787 add_fdes (ob
, &accu
, ob
->u
.single
);
789 end_fde_sort (ob
, &accu
, count
);
791 /* Save the original fde pointer, since this is the key by which the
792 DSO will deregister the object. */
793 accu
.linear
->orig_data
= ob
->u
.single
;
794 ob
->u
.sort
= accu
.linear
;
799 /* A linear search through a set of FDEs for the given PC. This is
800 used when there was insufficient memory to allocate and sort an
804 linear_search_fdes (struct object
*ob
, fde
*this_fde
, void *pc
)
806 struct dwarf_cie
*last_cie
= 0;
807 int encoding
= ob
->s
.b
.encoding
;
808 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
810 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
812 struct dwarf_cie
*this_cie
;
813 _Unwind_Ptr pc_begin
, pc_range
;
816 if (this_fde
->CIE_delta
== 0)
819 if (ob
->s
.b
.mixed_encoding
)
821 /* Determine the encoding for this FDE. Note mixed encoded
822 objects for later. */
823 this_cie
= get_cie (this_fde
);
824 if (this_cie
!= last_cie
)
827 encoding
= get_cie_encoding (this_cie
);
828 base
= base_from_object (encoding
, ob
);
832 if (encoding
== DW_EH_PE_absptr
)
834 pc_begin
= ((_Unwind_Ptr
*) this_fde
->pc_begin
)[0];
835 pc_range
= ((_Unwind_Ptr
*) this_fde
->pc_begin
)[1];
842 const unsigned char *p
;
844 p
= read_encoded_value_with_base (encoding
, base
,
845 this_fde
->pc_begin
, &pc_begin
);
846 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
848 /* Take care to ignore link-once functions that were removed.
849 In these cases, the function address will be NULL, but if
850 the encoding is smaller than a pointer a true NULL may not
851 be representable. Assume 0 in the representable bits is NULL. */
852 mask
= size_of_encoded_value (encoding
);
853 if (mask
< sizeof (void *))
854 mask
= (1L << (mask
<< 3)) - 1;
858 if ((pc_begin
& mask
) == 0)
862 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
869 /* Binary search for an FDE containing the given PC. Here are three
870 implementations of increasing complexity. */
873 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
875 struct fde_vector
*vec
= ob
->u
.sort
;
878 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
880 size_t i
= (lo
+ hi
) / 2;
881 fde
*f
= vec
->array
[i
];
885 pc_begin
= ((void **) f
->pc_begin
)[0];
886 pc_range
= ((uaddr
*) f
->pc_begin
)[1];
890 else if (pc
>= pc_begin
+ pc_range
)
900 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
902 struct fde_vector
*vec
= ob
->u
.sort
;
903 int encoding
= ob
->s
.b
.encoding
;
904 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
907 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
909 size_t i
= (lo
+ hi
) / 2;
910 fde
*f
= vec
->array
[i
];
911 _Unwind_Ptr pc_begin
, pc_range
;
912 const unsigned char *p
;
914 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
916 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
918 if ((_Unwind_Ptr
) pc
< pc_begin
)
920 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
930 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
932 struct fde_vector
*vec
= ob
->u
.sort
;
935 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
937 size_t i
= (lo
+ hi
) / 2;
938 fde
*f
= vec
->array
[i
];
939 _Unwind_Ptr pc_begin
, pc_range
;
940 const unsigned char *p
;
943 encoding
= get_fde_encoding (f
);
944 p
= read_encoded_value_with_base (encoding
,
945 base_from_object (encoding
, ob
),
946 f
->pc_begin
, &pc_begin
);
947 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
949 if ((_Unwind_Ptr
) pc
< pc_begin
)
951 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
961 search_object (struct object
* ob
, void *pc
)
963 /* If the data hasn't been sorted, try to do this now. We may have
964 more memory available than last time we tried. */
965 if (! ob
->s
.b
.sorted
)
969 /* Despite the above comment, the normal reason to get here is
970 that we've not processed this object before. A quick range
971 check is in order. */
972 if (pc
< ob
->pc_begin
)
978 if (ob
->s
.b
.mixed_encoding
)
979 return binary_search_mixed_encoding_fdes (ob
, pc
);
980 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
981 return binary_search_unencoded_fdes (ob
, pc
);
983 return binary_search_single_encoding_fdes (ob
, pc
);
987 /* Long slow labourious linear search, cos we've no memory. */
988 if (ob
->s
.b
.from_array
)
991 for (p
= ob
->u
.array
; *p
; p
++)
993 fde
*f
= linear_search_fdes (ob
, *p
, pc
);
1000 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
1005 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
1010 init_object_mutex_once ();
1011 __gthread_mutex_lock (&object_mutex
);
1013 /* Linear search through the classified objects, to find the one
1014 containing the pc. Note that pc_begin is sorted descending, and
1015 we expect objects to be non-overlapping. */
1016 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1017 if (pc
>= ob
->pc_begin
)
1019 f
= search_object (ob
, pc
);
1025 /* Classify and search the objects we've not yet processed. */
1026 while ((ob
= unseen_objects
))
1030 unseen_objects
= ob
->next
;
1031 f
= search_object (ob
, pc
);
1033 /* Insert the object into the classified list. */
1034 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1035 if ((*p
)->pc_begin
< ob
->pc_begin
)
1045 __gthread_mutex_unlock (&object_mutex
);
1052 bases
->tbase
= ob
->tbase
;
1053 bases
->dbase
= ob
->dbase
;
1055 encoding
= ob
->s
.b
.encoding
;
1056 if (ob
->s
.b
.mixed_encoding
)
1057 encoding
= get_fde_encoding (f
);
1058 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1059 f
->pc_begin
, &func
);
1060 bases
->func
= (void *) func
;