1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2024 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
21 # include <shlib-compat.h>
24 #if !defined _LIBC || SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2_5)
29 #include <libc-lock.h>
32 #define NO_BASE_OF_ENCODED_VALUE
33 #include <unwind-pe.h>
34 #include <unwind-dw2-fde.h>
36 #ifndef _Unwind_Find_FDE
41 #define NO_BASE_OF_ENCODED_VALUE
42 #include "unwind-pe.h"
43 #include "unwind-dw2-fde.h"
48 /* The unseen_objects list contains objects that have been registered
49 but not yet categorized in any way. The seen_objects list has had
50 it's pc_begin and count fields initialized at minimum, and is sorted
51 by decreasing value of pc_begin. */
52 static struct object
*unseen_objects
;
53 static struct object
*seen_objects
;
57 __libc_lock_define_initialized (static, object_mutex
)
58 #define init_object_mutex_once()
59 #define __gthread_mutex_lock(m) __libc_lock_lock (*(m))
60 #define __gthread_mutex_unlock(m) __libc_lock_unlock (*(m))
62 void __register_frame_info_bases (void *begin
, struct object
*ob
,
63 void *tbase
, void *dbase
);
64 hidden_proto (__register_frame_info_bases
)
65 void __register_frame_info_table_bases (void *begin
,
67 void *tbase
, void *dbase
);
68 hidden_proto (__register_frame_info_table_bases
)
69 void *__deregister_frame_info_bases (void *begin
);
70 hidden_proto (__deregister_frame_info_bases
)
74 #ifdef __GTHREAD_MUTEX_INIT
75 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
77 static __gthread_mutex_t object_mutex
;
80 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
82 init_object_mutex (void)
84 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
88 init_object_mutex_once (void)
90 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
91 __gthread_once (&once
, init_object_mutex
);
94 #define init_object_mutex_once()
99 /* Called from crtbegin.o to register the unwind info for an object. */
102 __register_frame_info_bases (void *begin
, struct object
*ob
,
103 void *tbase
, void *dbase
)
105 /* If .eh_frame is empty, don't register at all. */
106 if (*(uword
*) begin
== 0)
109 ob
->pc_begin
= (void *)-1;
112 ob
->u
.single
= begin
;
114 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
115 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
119 init_object_mutex_once ();
120 __gthread_mutex_lock (&object_mutex
);
122 ob
->next
= unseen_objects
;
125 __gthread_mutex_unlock (&object_mutex
);
127 hidden_def (__register_frame_info_bases
)
130 __register_frame_info (void *begin
, struct object
*ob
)
132 __register_frame_info_bases (begin
, ob
, 0, 0);
136 __register_frame (void *begin
)
140 /* If .eh_frame is empty, don't register at all. */
141 if (*(uword
*) begin
== 0)
144 ob
= (struct object
*) malloc (sizeof (struct object
));
145 __register_frame_info_bases (begin
, ob
, 0, 0);
148 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
149 for different translation units. Called from the file generated by
153 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
154 void *tbase
, void *dbase
)
156 ob
->pc_begin
= (void *)-1;
161 ob
->s
.b
.from_array
= 1;
162 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
164 init_object_mutex_once ();
165 __gthread_mutex_lock (&object_mutex
);
167 ob
->next
= unseen_objects
;
170 __gthread_mutex_unlock (&object_mutex
);
172 hidden_def (__register_frame_info_table_bases
)
175 __register_frame_info_table (void *begin
, struct object
*ob
)
177 __register_frame_info_table_bases (begin
, ob
, 0, 0);
181 __register_frame_table (void *begin
)
183 struct object
*ob
= (struct object
*) malloc (sizeof (struct object
));
184 __register_frame_info_table_bases (begin
, ob
, 0, 0);
187 /* Called from crtbegin.o to deregister the unwind info for an object. */
188 /* ??? Glibc has for a while now exported __register_frame_info and
189 __deregister_frame_info. If we call __register_frame_info_bases
190 from crtbegin (wherein it is declared weak), and this object does
191 not get pulled from libgcc.a for other reasons, then the
192 invocation of __deregister_frame_info will be resolved from glibc.
193 Since the registration did not happen there, we'll abort.
195 Therefore, declare a new deregistration entry point that does the
196 exact same thing, but will resolve to the same library as
197 implements __register_frame_info_bases. */
200 __deregister_frame_info_bases (void *begin
)
203 struct object
*ob
= 0;
204 struct fde_vector
*tofree
= NULL
;
206 /* If .eh_frame is empty, we haven't registered. */
207 if (*(uword
*) begin
== 0)
210 init_object_mutex_once ();
211 __gthread_mutex_lock (&object_mutex
);
213 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
214 if ((*p
)->u
.single
== begin
)
221 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
222 if ((*p
)->s
.b
.sorted
)
224 if ((*p
)->u
.sort
->orig_data
== begin
)
234 if ((*p
)->u
.single
== begin
)
242 __gthread_mutex_unlock (&object_mutex
);
246 __gthread_mutex_unlock (&object_mutex
);
250 hidden_def (__deregister_frame_info_bases
)
253 __deregister_frame_info (void *begin
)
255 return __deregister_frame_info_bases (begin
);
259 __deregister_frame (void *begin
)
261 /* If .eh_frame is empty, we haven't registered. */
262 if (*(uword
*) begin
!= 0)
263 free (__deregister_frame_info_bases (begin
));
267 /* Like base_of_encoded_value, but take the base from a struct object
268 instead of an _Unwind_Context. */
271 base_from_object (unsigned char encoding
, struct object
*ob
)
273 if (encoding
== DW_EH_PE_omit
)
276 switch (encoding
& 0x70)
278 case DW_EH_PE_absptr
:
280 case DW_EH_PE_aligned
:
283 case DW_EH_PE_textrel
:
284 return (_Unwind_Ptr
) ob
->tbase
;
285 case DW_EH_PE_datarel
:
286 return (_Unwind_Ptr
) ob
->dbase
;
291 /* Return the FDE pointer encoding from the CIE. */
292 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
295 get_cie_encoding (struct dwarf_cie
*cie
)
297 const unsigned char *aug
, *p
;
302 aug
= cie
->augmentation
;
304 return DW_EH_PE_absptr
;
306 /* Skip the augmentation string. */
307 p
= aug
+ strlen ((const char *) aug
) + 1;
308 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
309 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
310 p
++; /* Skip return address column. */
312 aug
++; /* Skip 'z' */
313 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
316 /* This is what we're looking for. */
319 /* Personality encoding and pointer. */
320 else if (*aug
== 'P')
322 /* ??? Avoid dereferencing indirect pointers, since we're
323 faking the base address. Gotta keep DW_EH_PE_aligned
325 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
328 else if (*aug
== 'L')
330 /* Otherwise end of string, or unknown augmentation. */
332 return DW_EH_PE_absptr
;
338 get_fde_encoding (struct dwarf_fde
*f
)
340 return get_cie_encoding (get_cie (f
));
344 /* Sorting an array of FDEs by address.
345 (Ideally we would have the linker sort the FDEs so we don't have to do
346 it at run time. But the linkers are not yet prepared for this.) */
348 /* Return the Nth pc_begin value from FDE x. */
350 static inline _Unwind_Ptr
351 get_pc_begin (fde
*x
, size_t n
)
354 memcpy (&p
, x
->pc_begin
+ n
* sizeof (_Unwind_Ptr
), sizeof (_Unwind_Ptr
));
358 /* Comparison routines. Three variants of increasing complexity. */
361 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
364 _Unwind_Ptr x_ptr
= get_pc_begin (x
, 0);
365 _Unwind_Ptr y_ptr
= get_pc_begin (y
, 0);
375 fde_single_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
377 _Unwind_Ptr base
, x_ptr
, y_ptr
;
379 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
380 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
381 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
391 fde_mixed_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
393 int x_encoding
, y_encoding
;
394 _Unwind_Ptr x_ptr
, y_ptr
;
396 x_encoding
= get_fde_encoding (x
);
397 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
398 x
->pc_begin
, &x_ptr
);
400 y_encoding
= get_fde_encoding (y
);
401 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
402 y
->pc_begin
, &y_ptr
);
411 typedef int (*fde_compare_t
) (struct object
*, fde
*, fde
*);
414 /* This is a special mix of insertion sort and heap sort, optimized for
415 the data sets that actually occur. They look like
416 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
417 I.e. a linearly increasing sequence (coming from functions in the text
418 section), with additionally a few unordered elements (coming from functions
419 in gnu_linkonce sections) whose values are higher than the values in the
420 surrounding linear sequence (but not necessarily higher than the values
421 at the end of the linear sequence!).
422 The worst-case total run time is O(N) + O(n log (n)), where N is the
423 total number of FDEs and n is the number of erratic ones. */
425 struct fde_accumulator
427 struct fde_vector
*linear
;
428 struct fde_vector
*erratic
;
432 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
438 size
= sizeof (struct fde_vector
) + sizeof (fde
*) * count
;
439 if ((accu
->linear
= (struct fde_vector
*) malloc (size
)))
441 accu
->linear
->count
= 0;
442 if ((accu
->erratic
= (struct fde_vector
*) malloc (size
)))
443 accu
->erratic
->count
= 0;
451 fde_insert (struct fde_accumulator
*accu
, fde
*this_fde
)
454 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
457 /* Split LINEAR into a linear sequence with low values and an erratic
458 sequence with high values, put the linear one (of longest possible
459 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
461 Because the longest linear sequence we are trying to locate within the
462 incoming LINEAR array can be interspersed with (high valued) erratic
463 entries. We construct a chain indicating the sequenced entries.
464 To avoid having to allocate this chain, we overlay it onto the space of
465 the ERRATIC array during construction. A final pass iterates over the
466 chain to determine what should be placed in the ERRATIC array, and
467 what is the linear sequence. This overlay is safe from aliasing. */
470 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
471 struct fde_vector
*linear
, struct fde_vector
*erratic
)
474 size_t count
= linear
->count
;
475 fde
**chain_end
= &marker
;
478 /* This should optimize out, but it is wise to make sure this assumption
479 is correct. Should these have different sizes, we cannot cast between
480 them and the overlaying onto ERRATIC will not work. */
481 if (sizeof (fde
*) != sizeof (fde
**))
484 for (i
= 0; i
< count
; i
++)
488 for (probe
= chain_end
;
489 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
492 chain_end
= (fde
**) erratic
->array
[probe
- linear
->array
];
493 erratic
->array
[probe
- linear
->array
] = NULL
;
495 erratic
->array
[i
] = (fde
*) chain_end
;
496 chain_end
= &linear
->array
[i
];
499 /* Each entry in LINEAR which is part of the linear sequence we have
500 discovered will correspond to a non-NULL entry in the chain we built in
501 the ERRATIC array. */
502 for (i
= j
= k
= 0; i
< count
; i
++)
503 if (erratic
->array
[i
])
504 linear
->array
[j
++] = linear
->array
[i
];
506 erratic
->array
[k
++] = linear
->array
[i
];
511 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
512 use a name that does not conflict. */
515 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
516 struct fde_vector
*erratic
)
518 /* For a description of this algorithm, see:
519 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
521 fde
** a
= erratic
->array
;
522 /* A portion of the array is called a "heap" if for all i>=0:
523 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
524 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
525 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
526 size_t n
= erratic
->count
;
532 /* Invariant: a[m..n-1] is a heap. */
534 for (i
= m
; 2*i
+1 < n
; )
537 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
538 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
540 SWAP (a
[i
], a
[2*i
+2]);
543 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
545 SWAP (a
[i
], a
[2*i
+1]);
554 /* Invariant: a[0..n-1] is a heap. */
557 for (i
= 0; 2*i
+1 < n
; )
560 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
561 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
563 SWAP (a
[i
], a
[2*i
+2]);
566 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
568 SWAP (a
[i
], a
[2*i
+1]);
578 /* Merge V1 and V2, both sorted, and put the result into V1. */
580 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
581 struct fde_vector
*v1
, struct fde_vector
*v2
)
593 fde2
= v2
->array
[i2
];
594 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
596 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
599 v1
->array
[i1
+i2
] = fde2
;
602 v1
->count
+= v2
->count
;
607 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
609 fde_compare_t fde_compare
;
611 if (accu
->linear
->count
!= count
)
614 if (ob
->s
.b
.mixed_encoding
)
615 fde_compare
= fde_mixed_encoding_compare
;
616 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
617 fde_compare
= fde_unencoded_compare
;
619 fde_compare
= fde_single_encoding_compare
;
623 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
624 if (accu
->linear
->count
+ accu
->erratic
->count
!= count
)
626 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
627 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
628 free (accu
->erratic
);
632 /* We've not managed to malloc an erratic array,
633 so heap sort in the linear one. */
634 frame_heapsort (ob
, fde_compare
, accu
->linear
);
639 /* Update encoding, mixed_encoding, and pc_begin for OB for the
640 fde array beginning at THIS_FDE. Return the number of fdes
641 encountered along the way. */
644 classify_object_over_fdes (struct object
*ob
, fde
*this_fde
)
646 struct dwarf_cie
*last_cie
= 0;
648 int encoding
= DW_EH_PE_absptr
;
649 _Unwind_Ptr base
= 0;
651 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
653 struct dwarf_cie
*this_cie
;
654 _Unwind_Ptr mask
, pc_begin
;
657 if (this_fde
->CIE_delta
== 0)
660 /* Determine the encoding for this FDE. Note mixed encoded
661 objects for later. */
662 this_cie
= get_cie (this_fde
);
663 if (this_cie
!= last_cie
)
666 encoding
= get_cie_encoding (this_cie
);
667 base
= base_from_object (encoding
, ob
);
668 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
669 ob
->s
.b
.encoding
= encoding
;
670 else if (ob
->s
.b
.encoding
!= encoding
)
671 ob
->s
.b
.mixed_encoding
= 1;
674 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
677 /* Take care to ignore link-once functions that were removed.
678 In these cases, the function address will be NULL, but if
679 the encoding is smaller than a pointer a true NULL may not
680 be representable. Assume 0 in the representable bits is NULL. */
681 mask
= size_of_encoded_value (encoding
);
682 if (mask
< sizeof (void *))
683 mask
= (1L << (mask
<< 3)) - 1;
687 if ((pc_begin
& mask
) == 0)
691 if ((void *) pc_begin
< ob
->pc_begin
)
692 ob
->pc_begin
= (void *) pc_begin
;
699 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, fde
*this_fde
)
701 struct dwarf_cie
*last_cie
= 0;
702 int encoding
= ob
->s
.b
.encoding
;
703 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
705 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
707 struct dwarf_cie
*this_cie
;
710 if (this_fde
->CIE_delta
== 0)
713 if (ob
->s
.b
.mixed_encoding
)
715 /* Determine the encoding for this FDE. Note mixed encoded
716 objects for later. */
717 this_cie
= get_cie (this_fde
);
718 if (this_cie
!= last_cie
)
721 encoding
= get_cie_encoding (this_cie
);
722 base
= base_from_object (encoding
, ob
);
726 if (encoding
== DW_EH_PE_absptr
)
728 if (get_pc_begin (this_fde
, 0) == 0)
733 _Unwind_Ptr pc_begin
, mask
;
735 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
738 /* Take care to ignore link-once functions that were removed.
739 In these cases, the function address will be NULL, but if
740 the encoding is smaller than a pointer a true NULL may not
741 be representable. Assume 0 in the representable bits is NULL. */
742 mask
= size_of_encoded_value (encoding
);
743 if (mask
< sizeof (void *))
744 mask
= (1L << (mask
<< 3)) - 1;
748 if ((pc_begin
& mask
) == 0)
752 fde_insert (accu
, this_fde
);
756 /* Set up a sorted array of pointers to FDEs for a loaded object. We
757 count up the entries before allocating the array because it's likely to
758 be faster. We can be called multiple times, should we have failed to
759 allocate a sorted fde array on a previous occasion. */
762 init_object (struct object
* ob
)
764 struct fde_accumulator accu
;
767 count
= ob
->s
.b
.count
;
770 if (ob
->s
.b
.from_array
)
772 fde
**p
= ob
->u
.array
;
773 for (count
= 0; *p
; ++p
)
774 count
+= classify_object_over_fdes (ob
, *p
);
777 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
779 /* The count field we have in the main struct object is somewhat
780 limited, but should suffice for virtually all cases. If the
781 counted value doesn't fit, re-write a zero. The worst that
782 happens is that we re-count next time -- admittedly non-trivial
783 in that this implies some 2M fdes, but at least we function. */
784 ob
->s
.b
.count
= count
;
785 if (ob
->s
.b
.count
!= count
)
789 if (!start_fde_sort (&accu
, count
))
792 if (ob
->s
.b
.from_array
)
795 for (p
= ob
->u
.array
; *p
; ++p
)
796 add_fdes (ob
, &accu
, *p
);
799 add_fdes (ob
, &accu
, ob
->u
.single
);
801 end_fde_sort (ob
, &accu
, count
);
803 /* Save the original fde pointer, since this is the key by which the
804 DSO will deregister the object. */
805 accu
.linear
->orig_data
= ob
->u
.single
;
806 ob
->u
.sort
= accu
.linear
;
811 /* A linear search through a set of FDEs for the given PC. This is
812 used when there was insufficient memory to allocate and sort an
816 linear_search_fdes (struct object
*ob
, fde
*this_fde
, void *pc
)
818 struct dwarf_cie
*last_cie
= 0;
819 int encoding
= ob
->s
.b
.encoding
;
820 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
822 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
824 struct dwarf_cie
*this_cie
;
825 _Unwind_Ptr pc_begin
, pc_range
;
828 if (this_fde
->CIE_delta
== 0)
831 if (ob
->s
.b
.mixed_encoding
)
833 /* Determine the encoding for this FDE. Note mixed encoded
834 objects for later. */
835 this_cie
= get_cie (this_fde
);
836 if (this_cie
!= last_cie
)
839 encoding
= get_cie_encoding (this_cie
);
840 base
= base_from_object (encoding
, ob
);
844 if (encoding
== DW_EH_PE_absptr
)
846 pc_begin
= get_pc_begin (this_fde
, 0);
847 pc_range
= get_pc_begin (this_fde
, 1);
854 const unsigned char *p
;
856 p
= read_encoded_value_with_base (encoding
, base
,
857 this_fde
->pc_begin
, &pc_begin
);
858 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
860 /* Take care to ignore link-once functions that were removed.
861 In these cases, the function address will be NULL, but if
862 the encoding is smaller than a pointer a true NULL may not
863 be representable. Assume 0 in the representable bits is NULL. */
864 mask
= size_of_encoded_value (encoding
);
865 if (mask
< sizeof (void *))
866 mask
= (1L << (mask
<< 3)) - 1;
870 if ((pc_begin
& mask
) == 0)
874 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
881 /* Binary search for an FDE containing the given PC. Here are three
882 implementations of increasing complexity. */
885 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
887 struct fde_vector
*vec
= ob
->u
.sort
;
890 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
892 size_t i
= (lo
+ hi
) / 2;
893 fde
*f
= vec
->array
[i
];
897 pc_begin
= (void *) get_pc_begin (f
, 0);
898 pc_range
= (uaddr
) get_pc_begin (f
, 1);
902 else if (pc
>= pc_begin
+ pc_range
)
912 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
914 struct fde_vector
*vec
= ob
->u
.sort
;
915 int encoding
= ob
->s
.b
.encoding
;
916 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
919 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
921 size_t i
= (lo
+ hi
) / 2;
922 fde
*f
= vec
->array
[i
];
923 _Unwind_Ptr pc_begin
, pc_range
;
924 const unsigned char *p
;
926 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
928 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
930 if ((_Unwind_Ptr
) pc
< pc_begin
)
932 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
942 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
944 struct fde_vector
*vec
= ob
->u
.sort
;
947 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
949 size_t i
= (lo
+ hi
) / 2;
950 fde
*f
= vec
->array
[i
];
951 _Unwind_Ptr pc_begin
, pc_range
;
952 const unsigned char *p
;
955 encoding
= get_fde_encoding (f
);
956 p
= read_encoded_value_with_base (encoding
,
957 base_from_object (encoding
, ob
),
958 f
->pc_begin
, &pc_begin
);
959 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
961 if ((_Unwind_Ptr
) pc
< pc_begin
)
963 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
973 search_object (struct object
* ob
, void *pc
)
975 /* If the data hasn't been sorted, try to do this now. We may have
976 more memory available than last time we tried. */
977 if (! ob
->s
.b
.sorted
)
981 /* Despite the above comment, the normal reason to get here is
982 that we've not processed this object before. A quick range
983 check is in order. */
984 if (pc
< ob
->pc_begin
)
990 if (ob
->s
.b
.mixed_encoding
)
991 return binary_search_mixed_encoding_fdes (ob
, pc
);
992 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
993 return binary_search_unencoded_fdes (ob
, pc
);
995 return binary_search_single_encoding_fdes (ob
, pc
);
999 /* Long slow labourious linear search, cos we've no memory. */
1000 if (ob
->s
.b
.from_array
)
1003 for (p
= ob
->u
.array
; *p
; p
++)
1005 fde
*f
= linear_search_fdes (ob
, *p
, pc
);
1012 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
1017 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
1022 init_object_mutex_once ();
1023 __gthread_mutex_lock (&object_mutex
);
1025 /* Linear search through the classified objects, to find the one
1026 containing the pc. Note that pc_begin is sorted descending, and
1027 we expect objects to be non-overlapping. */
1028 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1029 if (pc
>= ob
->pc_begin
)
1031 f
= search_object (ob
, pc
);
1037 /* Classify and search the objects we've not yet processed. */
1038 while ((ob
= unseen_objects
))
1042 unseen_objects
= ob
->next
;
1043 f
= search_object (ob
, pc
);
1045 /* Insert the object into the classified list. */
1046 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1047 if ((*p
)->pc_begin
< ob
->pc_begin
)
1057 __gthread_mutex_unlock (&object_mutex
);
1064 bases
->tbase
= ob
->tbase
;
1065 bases
->dbase
= ob
->dbase
;
1067 encoding
= ob
->s
.b
.encoding
;
1068 if (ob
->s
.b
.mixed_encoding
)
1069 encoding
= get_fde_encoding (f
);
1070 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1071 f
->pc_begin
, &func
);
1072 bases
->func
= (void *) func
;