1 /* Subroutines needed for unwinding stack frames for exception handling. */
2 /* Copyright (C) 1997-2014 Free Software Foundation, Inc.
3 Contributed by Jason Merrill <jason@cygnus.com>.
5 This file is part of the GNU C Library.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; if not, see
19 <http://www.gnu.org/licenses/>. */
22 # include <shlib-compat.h>
25 #if !defined _LIBC || SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_2_5)
30 #include <bits/libc-lock.h>
33 #define NO_BASE_OF_ENCODED_VALUE
34 #include <unwind-pe.h>
35 #include <unwind-dw2-fde.h>
37 #ifndef _Unwind_Find_FDE
42 #define NO_BASE_OF_ENCODED_VALUE
43 #include "unwind-pe.h"
44 #include "unwind-dw2-fde.h"
49 /* The unseen_objects list contains objects that have been registered
50 but not yet categorized in any way. The seen_objects list has had
51 it's pc_begin and count fields initialized at minimum, and is sorted
52 by decreasing value of pc_begin. */
53 static struct object
*unseen_objects
;
54 static struct object
*seen_objects
;
58 __libc_lock_define_initialized (static, object_mutex
)
59 #define init_object_mutex_once()
60 #define __gthread_mutex_lock(m) __libc_lock_lock (*(m))
61 #define __gthread_mutex_unlock(m) __libc_lock_unlock (*(m))
63 void __register_frame_info_bases (void *begin
, struct object
*ob
,
64 void *tbase
, void *dbase
);
65 hidden_proto (__register_frame_info_bases
)
66 void __register_frame_info_table_bases (void *begin
,
68 void *tbase
, void *dbase
);
69 hidden_proto (__register_frame_info_table_bases
)
70 void *__deregister_frame_info_bases (void *begin
);
71 hidden_proto (__deregister_frame_info_bases
)
75 #ifdef __GTHREAD_MUTEX_INIT
76 static __gthread_mutex_t object_mutex
= __GTHREAD_MUTEX_INIT
;
78 static __gthread_mutex_t object_mutex
;
81 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION
83 init_object_mutex (void)
85 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex
);
89 init_object_mutex_once (void)
91 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
92 __gthread_once (&once
, init_object_mutex
);
95 #define init_object_mutex_once()
100 /* Called from crtbegin.o to register the unwind info for an object. */
103 __register_frame_info_bases (void *begin
, struct object
*ob
,
104 void *tbase
, void *dbase
)
106 /* If .eh_frame is empty, don't register at all. */
107 if (*(uword
*) begin
== 0)
110 ob
->pc_begin
= (void *)-1;
113 ob
->u
.single
= begin
;
115 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
116 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION
120 init_object_mutex_once ();
121 __gthread_mutex_lock (&object_mutex
);
123 ob
->next
= unseen_objects
;
126 __gthread_mutex_unlock (&object_mutex
);
128 hidden_def (__register_frame_info_bases
)
131 __register_frame_info (void *begin
, struct object
*ob
)
133 __register_frame_info_bases (begin
, ob
, 0, 0);
137 __register_frame (void *begin
)
141 /* If .eh_frame is empty, don't register at all. */
142 if (*(uword
*) begin
== 0)
145 ob
= (struct object
*) malloc (sizeof (struct object
));
146 __register_frame_info_bases (begin
, ob
, 0, 0);
149 /* Similar, but BEGIN is actually a pointer to a table of unwind entries
150 for different translation units. Called from the file generated by
154 __register_frame_info_table_bases (void *begin
, struct object
*ob
,
155 void *tbase
, void *dbase
)
157 ob
->pc_begin
= (void *)-1;
162 ob
->s
.b
.from_array
= 1;
163 ob
->s
.b
.encoding
= DW_EH_PE_omit
;
165 init_object_mutex_once ();
166 __gthread_mutex_lock (&object_mutex
);
168 ob
->next
= unseen_objects
;
171 __gthread_mutex_unlock (&object_mutex
);
173 hidden_def (__register_frame_info_table_bases
)
176 __register_frame_info_table (void *begin
, struct object
*ob
)
178 __register_frame_info_table_bases (begin
, ob
, 0, 0);
182 __register_frame_table (void *begin
)
184 struct object
*ob
= (struct object
*) malloc (sizeof (struct object
));
185 __register_frame_info_table_bases (begin
, ob
, 0, 0);
188 /* Called from crtbegin.o to deregister the unwind info for an object. */
189 /* ??? Glibc has for a while now exported __register_frame_info and
190 __deregister_frame_info. If we call __register_frame_info_bases
191 from crtbegin (wherein it is declared weak), and this object does
192 not get pulled from libgcc.a for other reasons, then the
193 invocation of __deregister_frame_info will be resolved from glibc.
194 Since the registration did not happen there, we'll abort.
196 Therefore, declare a new deregistration entry point that does the
197 exact same thing, but will resolve to the same library as
198 implements __register_frame_info_bases. */
201 __deregister_frame_info_bases (void *begin
)
204 struct object
*ob
= 0;
206 /* If .eh_frame is empty, we haven't registered. */
207 if (*(uword
*) begin
== 0)
210 init_object_mutex_once ();
211 __gthread_mutex_lock (&object_mutex
);
213 for (p
= &unseen_objects
; *p
; p
= &(*p
)->next
)
214 if ((*p
)->u
.single
== begin
)
221 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
222 if ((*p
)->s
.b
.sorted
)
224 if ((*p
)->u
.sort
->orig_data
== begin
)
234 if ((*p
)->u
.single
== begin
)
242 __gthread_mutex_unlock (&object_mutex
);
246 __gthread_mutex_unlock (&object_mutex
);
249 hidden_def (__deregister_frame_info_bases
)
252 __deregister_frame_info (void *begin
)
254 return __deregister_frame_info_bases (begin
);
258 __deregister_frame (void *begin
)
260 /* If .eh_frame is empty, we haven't registered. */
261 if (*(uword
*) begin
!= 0)
262 free (__deregister_frame_info_bases (begin
));
266 /* Like base_of_encoded_value, but take the base from a struct object
267 instead of an _Unwind_Context. */
270 base_from_object (unsigned char encoding
, struct object
*ob
)
272 if (encoding
== DW_EH_PE_omit
)
275 switch (encoding
& 0x70)
277 case DW_EH_PE_absptr
:
279 case DW_EH_PE_aligned
:
282 case DW_EH_PE_textrel
:
283 return (_Unwind_Ptr
) ob
->tbase
;
284 case DW_EH_PE_datarel
:
285 return (_Unwind_Ptr
) ob
->dbase
;
290 /* Return the FDE pointer encoding from the CIE. */
291 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */
294 get_cie_encoding (struct dwarf_cie
*cie
)
296 const unsigned char *aug
, *p
;
301 aug
= cie
->augmentation
;
303 return DW_EH_PE_absptr
;
305 /* Skip the augmentation string. */
306 p
= aug
+ strlen ((const char *) aug
) + 1;
307 p
= read_uleb128 (p
, &utmp
); /* Skip code alignment. */
308 p
= read_sleb128 (p
, &stmp
); /* Skip data alignment. */
309 p
++; /* Skip return address column. */
311 aug
++; /* Skip 'z' */
312 p
= read_uleb128 (p
, &utmp
); /* Skip augmentation length. */
315 /* This is what we're looking for. */
318 /* Personality encoding and pointer. */
319 else if (*aug
== 'P')
321 /* ??? Avoid dereferencing indirect pointers, since we're
322 faking the base address. Gotta keep DW_EH_PE_aligned
324 p
= read_encoded_value_with_base (*p
& 0x7F, 0, p
+ 1, &dummy
);
327 else if (*aug
== 'L')
329 /* Otherwise end of string, or unknown augmentation. */
331 return DW_EH_PE_absptr
;
337 get_fde_encoding (struct dwarf_fde
*f
)
339 return get_cie_encoding (get_cie (f
));
343 /* Sorting an array of FDEs by address.
344 (Ideally we would have the linker sort the FDEs so we don't have to do
345 it at run time. But the linkers are not yet prepared for this.) */
347 /* Return the Nth pc_begin value from FDE x. */
349 static inline _Unwind_Ptr
350 get_pc_begin (fde
*x
, size_t n
)
353 memcpy (&p
, x
->pc_begin
+ n
* sizeof (_Unwind_Ptr
), sizeof (_Unwind_Ptr
));
357 /* Comparison routines. Three variants of increasing complexity. */
360 fde_unencoded_compare (struct object
*ob
__attribute__((unused
)),
363 _Unwind_Ptr x_ptr
= get_pc_begin (x
, 0);
364 _Unwind_Ptr y_ptr
= get_pc_begin (y
, 0);
374 fde_single_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
376 _Unwind_Ptr base
, x_ptr
, y_ptr
;
378 base
= base_from_object (ob
->s
.b
.encoding
, ob
);
379 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, x
->pc_begin
, &x_ptr
);
380 read_encoded_value_with_base (ob
->s
.b
.encoding
, base
, y
->pc_begin
, &y_ptr
);
390 fde_mixed_encoding_compare (struct object
*ob
, fde
*x
, fde
*y
)
392 int x_encoding
, y_encoding
;
393 _Unwind_Ptr x_ptr
, y_ptr
;
395 x_encoding
= get_fde_encoding (x
);
396 read_encoded_value_with_base (x_encoding
, base_from_object (x_encoding
, ob
),
397 x
->pc_begin
, &x_ptr
);
399 y_encoding
= get_fde_encoding (y
);
400 read_encoded_value_with_base (y_encoding
, base_from_object (y_encoding
, ob
),
401 y
->pc_begin
, &y_ptr
);
410 typedef int (*fde_compare_t
) (struct object
*, fde
*, fde
*);
413 /* This is a special mix of insertion sort and heap sort, optimized for
414 the data sets that actually occur. They look like
415 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130.
416 I.e. a linearly increasing sequence (coming from functions in the text
417 section), with additionally a few unordered elements (coming from functions
418 in gnu_linkonce sections) whose values are higher than the values in the
419 surrounding linear sequence (but not necessarily higher than the values
420 at the end of the linear sequence!).
421 The worst-case total run time is O(N) + O(n log (n)), where N is the
422 total number of FDEs and n is the number of erratic ones. */
424 struct fde_accumulator
426 struct fde_vector
*linear
;
427 struct fde_vector
*erratic
;
431 start_fde_sort (struct fde_accumulator
*accu
, size_t count
)
437 size
= sizeof (struct fde_vector
) + sizeof (fde
*) * count
;
438 if ((accu
->linear
= (struct fde_vector
*) malloc (size
)))
440 accu
->linear
->count
= 0;
441 if ((accu
->erratic
= (struct fde_vector
*) malloc (size
)))
442 accu
->erratic
->count
= 0;
450 fde_insert (struct fde_accumulator
*accu
, fde
*this_fde
)
453 accu
->linear
->array
[accu
->linear
->count
++] = this_fde
;
456 /* Split LINEAR into a linear sequence with low values and an erratic
457 sequence with high values, put the linear one (of longest possible
458 length) into LINEAR and the erratic one into ERRATIC. This is O(N).
460 Because the longest linear sequence we are trying to locate within the
461 incoming LINEAR array can be interspersed with (high valued) erratic
462 entries. We construct a chain indicating the sequenced entries.
463 To avoid having to allocate this chain, we overlay it onto the space of
464 the ERRATIC array during construction. A final pass iterates over the
465 chain to determine what should be placed in the ERRATIC array, and
466 what is the linear sequence. This overlay is safe from aliasing. */
469 fde_split (struct object
*ob
, fde_compare_t fde_compare
,
470 struct fde_vector
*linear
, struct fde_vector
*erratic
)
473 size_t count
= linear
->count
;
474 fde
**chain_end
= &marker
;
477 /* This should optimize out, but it is wise to make sure this assumption
478 is correct. Should these have different sizes, we cannot cast between
479 them and the overlaying onto ERRATIC will not work. */
480 if (sizeof (fde
*) != sizeof (fde
**))
483 for (i
= 0; i
< count
; i
++)
487 for (probe
= chain_end
;
488 probe
!= &marker
&& fde_compare (ob
, linear
->array
[i
], *probe
) < 0;
491 chain_end
= (fde
**) erratic
->array
[probe
- linear
->array
];
492 erratic
->array
[probe
- linear
->array
] = NULL
;
494 erratic
->array
[i
] = (fde
*) chain_end
;
495 chain_end
= &linear
->array
[i
];
498 /* Each entry in LINEAR which is part of the linear sequence we have
499 discovered will correspond to a non-NULL entry in the chain we built in
500 the ERRATIC array. */
501 for (i
= j
= k
= 0; i
< count
; i
++)
502 if (erratic
->array
[i
])
503 linear
->array
[j
++] = linear
->array
[i
];
505 erratic
->array
[k
++] = linear
->array
[i
];
510 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must
511 use a name that does not conflict. */
514 frame_heapsort (struct object
*ob
, fde_compare_t fde_compare
,
515 struct fde_vector
*erratic
)
517 /* For a description of this algorithm, see:
518 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed.,
520 fde
** a
= erratic
->array
;
521 /* A portion of the array is called a "heap" if for all i>=0:
522 If i and 2i+1 are valid indices, then a[i] >= a[2i+1].
523 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */
524 #define SWAP(x,y) do { fde * tmp = x; x = y; y = tmp; } while (0)
525 size_t n
= erratic
->count
;
531 /* Invariant: a[m..n-1] is a heap. */
533 for (i
= m
; 2*i
+1 < n
; )
536 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
537 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
539 SWAP (a
[i
], a
[2*i
+2]);
542 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
544 SWAP (a
[i
], a
[2*i
+1]);
553 /* Invariant: a[0..n-1] is a heap. */
556 for (i
= 0; 2*i
+1 < n
; )
559 && fde_compare (ob
, a
[2*i
+2], a
[2*i
+1]) > 0
560 && fde_compare (ob
, a
[2*i
+2], a
[i
]) > 0)
562 SWAP (a
[i
], a
[2*i
+2]);
565 else if (fde_compare (ob
, a
[2*i
+1], a
[i
]) > 0)
567 SWAP (a
[i
], a
[2*i
+1]);
577 /* Merge V1 and V2, both sorted, and put the result into V1. */
579 fde_merge (struct object
*ob
, fde_compare_t fde_compare
,
580 struct fde_vector
*v1
, struct fde_vector
*v2
)
592 fde2
= v2
->array
[i2
];
593 while (i1
> 0 && fde_compare (ob
, v1
->array
[i1
-1], fde2
) > 0)
595 v1
->array
[i1
+i2
] = v1
->array
[i1
-1];
598 v1
->array
[i1
+i2
] = fde2
;
601 v1
->count
+= v2
->count
;
606 end_fde_sort (struct object
*ob
, struct fde_accumulator
*accu
, size_t count
)
608 fde_compare_t fde_compare
;
610 if (accu
->linear
->count
!= count
)
613 if (ob
->s
.b
.mixed_encoding
)
614 fde_compare
= fde_mixed_encoding_compare
;
615 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
616 fde_compare
= fde_unencoded_compare
;
618 fde_compare
= fde_single_encoding_compare
;
622 fde_split (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
623 if (accu
->linear
->count
+ accu
->erratic
->count
!= count
)
625 frame_heapsort (ob
, fde_compare
, accu
->erratic
);
626 fde_merge (ob
, fde_compare
, accu
->linear
, accu
->erratic
);
627 free (accu
->erratic
);
631 /* We've not managed to malloc an erratic array,
632 so heap sort in the linear one. */
633 frame_heapsort (ob
, fde_compare
, accu
->linear
);
638 /* Update encoding, mixed_encoding, and pc_begin for OB for the
639 fde array beginning at THIS_FDE. Return the number of fdes
640 encountered along the way. */
643 classify_object_over_fdes (struct object
*ob
, fde
*this_fde
)
645 struct dwarf_cie
*last_cie
= 0;
647 int encoding
= DW_EH_PE_absptr
;
648 _Unwind_Ptr base
= 0;
650 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
652 struct dwarf_cie
*this_cie
;
653 _Unwind_Ptr mask
, pc_begin
;
656 if (this_fde
->CIE_delta
== 0)
659 /* Determine the encoding for this FDE. Note mixed encoded
660 objects for later. */
661 this_cie
= get_cie (this_fde
);
662 if (this_cie
!= last_cie
)
665 encoding
= get_cie_encoding (this_cie
);
666 base
= base_from_object (encoding
, ob
);
667 if (ob
->s
.b
.encoding
== DW_EH_PE_omit
)
668 ob
->s
.b
.encoding
= encoding
;
669 else if (ob
->s
.b
.encoding
!= encoding
)
670 ob
->s
.b
.mixed_encoding
= 1;
673 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
676 /* Take care to ignore link-once functions that were removed.
677 In these cases, the function address will be NULL, but if
678 the encoding is smaller than a pointer a true NULL may not
679 be representable. Assume 0 in the representable bits is NULL. */
680 mask
= size_of_encoded_value (encoding
);
681 if (mask
< sizeof (void *))
682 mask
= (1L << (mask
<< 3)) - 1;
686 if ((pc_begin
& mask
) == 0)
690 if ((void *) pc_begin
< ob
->pc_begin
)
691 ob
->pc_begin
= (void *) pc_begin
;
698 add_fdes (struct object
*ob
, struct fde_accumulator
*accu
, fde
*this_fde
)
700 struct dwarf_cie
*last_cie
= 0;
701 int encoding
= ob
->s
.b
.encoding
;
702 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
704 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
706 struct dwarf_cie
*this_cie
;
709 if (this_fde
->CIE_delta
== 0)
712 if (ob
->s
.b
.mixed_encoding
)
714 /* Determine the encoding for this FDE. Note mixed encoded
715 objects for later. */
716 this_cie
= get_cie (this_fde
);
717 if (this_cie
!= last_cie
)
720 encoding
= get_cie_encoding (this_cie
);
721 base
= base_from_object (encoding
, ob
);
725 if (encoding
== DW_EH_PE_absptr
)
727 if (get_pc_begin (this_fde
, 0) == 0)
732 _Unwind_Ptr pc_begin
, mask
;
734 read_encoded_value_with_base (encoding
, base
, this_fde
->pc_begin
,
737 /* Take care to ignore link-once functions that were removed.
738 In these cases, the function address will be NULL, but if
739 the encoding is smaller than a pointer a true NULL may not
740 be representable. Assume 0 in the representable bits is NULL. */
741 mask
= size_of_encoded_value (encoding
);
742 if (mask
< sizeof (void *))
743 mask
= (1L << (mask
<< 3)) - 1;
747 if ((pc_begin
& mask
) == 0)
751 fde_insert (accu
, this_fde
);
755 /* Set up a sorted array of pointers to FDEs for a loaded object. We
756 count up the entries before allocating the array because it's likely to
757 be faster. We can be called multiple times, should we have failed to
758 allocate a sorted fde array on a previous occasion. */
761 init_object (struct object
* ob
)
763 struct fde_accumulator accu
;
766 count
= ob
->s
.b
.count
;
769 if (ob
->s
.b
.from_array
)
771 fde
**p
= ob
->u
.array
;
772 for (count
= 0; *p
; ++p
)
773 count
+= classify_object_over_fdes (ob
, *p
);
776 count
= classify_object_over_fdes (ob
, ob
->u
.single
);
778 /* The count field we have in the main struct object is somewhat
779 limited, but should suffice for virtually all cases. If the
780 counted value doesn't fit, re-write a zero. The worst that
781 happens is that we re-count next time -- admittedly non-trivial
782 in that this implies some 2M fdes, but at least we function. */
783 ob
->s
.b
.count
= count
;
784 if (ob
->s
.b
.count
!= count
)
788 if (!start_fde_sort (&accu
, count
))
791 if (ob
->s
.b
.from_array
)
794 for (p
= ob
->u
.array
; *p
; ++p
)
795 add_fdes (ob
, &accu
, *p
);
798 add_fdes (ob
, &accu
, ob
->u
.single
);
800 end_fde_sort (ob
, &accu
, count
);
802 /* Save the original fde pointer, since this is the key by which the
803 DSO will deregister the object. */
804 accu
.linear
->orig_data
= ob
->u
.single
;
805 ob
->u
.sort
= accu
.linear
;
810 /* A linear search through a set of FDEs for the given PC. This is
811 used when there was insufficient memory to allocate and sort an
815 linear_search_fdes (struct object
*ob
, fde
*this_fde
, void *pc
)
817 struct dwarf_cie
*last_cie
= 0;
818 int encoding
= ob
->s
.b
.encoding
;
819 _Unwind_Ptr base
= base_from_object (ob
->s
.b
.encoding
, ob
);
821 for (; ! last_fde (ob
, this_fde
); this_fde
= next_fde (this_fde
))
823 struct dwarf_cie
*this_cie
;
824 _Unwind_Ptr pc_begin
, pc_range
;
827 if (this_fde
->CIE_delta
== 0)
830 if (ob
->s
.b
.mixed_encoding
)
832 /* Determine the encoding for this FDE. Note mixed encoded
833 objects for later. */
834 this_cie
= get_cie (this_fde
);
835 if (this_cie
!= last_cie
)
838 encoding
= get_cie_encoding (this_cie
);
839 base
= base_from_object (encoding
, ob
);
843 if (encoding
== DW_EH_PE_absptr
)
845 pc_begin
= get_pc_begin (this_fde
, 0);
846 pc_range
= get_pc_begin (this_fde
, 1);
853 const unsigned char *p
;
855 p
= read_encoded_value_with_base (encoding
, base
,
856 this_fde
->pc_begin
, &pc_begin
);
857 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
859 /* Take care to ignore link-once functions that were removed.
860 In these cases, the function address will be NULL, but if
861 the encoding is smaller than a pointer a true NULL may not
862 be representable. Assume 0 in the representable bits is NULL. */
863 mask
= size_of_encoded_value (encoding
);
864 if (mask
< sizeof (void *))
865 mask
= (1L << (mask
<< 3)) - 1;
869 if ((pc_begin
& mask
) == 0)
873 if ((_Unwind_Ptr
) pc
- pc_begin
< pc_range
)
880 /* Binary search for an FDE containing the given PC. Here are three
881 implementations of increasing complexity. */
884 binary_search_unencoded_fdes (struct object
*ob
, void *pc
)
886 struct fde_vector
*vec
= ob
->u
.sort
;
889 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
891 size_t i
= (lo
+ hi
) / 2;
892 fde
*f
= vec
->array
[i
];
896 pc_begin
= (void *) get_pc_begin (f
, 0);
897 pc_range
= (uaddr
) get_pc_begin (f
, 1);
901 else if (pc
>= pc_begin
+ pc_range
)
911 binary_search_single_encoding_fdes (struct object
*ob
, void *pc
)
913 struct fde_vector
*vec
= ob
->u
.sort
;
914 int encoding
= ob
->s
.b
.encoding
;
915 _Unwind_Ptr base
= base_from_object (encoding
, ob
);
918 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
920 size_t i
= (lo
+ hi
) / 2;
921 fde
*f
= vec
->array
[i
];
922 _Unwind_Ptr pc_begin
, pc_range
;
923 const unsigned char *p
;
925 p
= read_encoded_value_with_base (encoding
, base
, f
->pc_begin
,
927 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
929 if ((_Unwind_Ptr
) pc
< pc_begin
)
931 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
941 binary_search_mixed_encoding_fdes (struct object
*ob
, void *pc
)
943 struct fde_vector
*vec
= ob
->u
.sort
;
946 for (lo
= 0, hi
= vec
->count
; lo
< hi
; )
948 size_t i
= (lo
+ hi
) / 2;
949 fde
*f
= vec
->array
[i
];
950 _Unwind_Ptr pc_begin
, pc_range
;
951 const unsigned char *p
;
954 encoding
= get_fde_encoding (f
);
955 p
= read_encoded_value_with_base (encoding
,
956 base_from_object (encoding
, ob
),
957 f
->pc_begin
, &pc_begin
);
958 read_encoded_value_with_base (encoding
& 0x0F, 0, p
, &pc_range
);
960 if ((_Unwind_Ptr
) pc
< pc_begin
)
962 else if ((_Unwind_Ptr
) pc
>= pc_begin
+ pc_range
)
972 search_object (struct object
* ob
, void *pc
)
974 /* If the data hasn't been sorted, try to do this now. We may have
975 more memory available than last time we tried. */
976 if (! ob
->s
.b
.sorted
)
980 /* Despite the above comment, the normal reason to get here is
981 that we've not processed this object before. A quick range
982 check is in order. */
983 if (pc
< ob
->pc_begin
)
989 if (ob
->s
.b
.mixed_encoding
)
990 return binary_search_mixed_encoding_fdes (ob
, pc
);
991 else if (ob
->s
.b
.encoding
== DW_EH_PE_absptr
)
992 return binary_search_unencoded_fdes (ob
, pc
);
994 return binary_search_single_encoding_fdes (ob
, pc
);
998 /* Long slow labourious linear search, cos we've no memory. */
999 if (ob
->s
.b
.from_array
)
1002 for (p
= ob
->u
.array
; *p
; p
++)
1004 fde
*f
= linear_search_fdes (ob
, *p
, pc
);
1011 return linear_search_fdes (ob
, ob
->u
.single
, pc
);
1016 _Unwind_Find_FDE (void *pc
, struct dwarf_eh_bases
*bases
)
1021 init_object_mutex_once ();
1022 __gthread_mutex_lock (&object_mutex
);
1024 /* Linear search through the classified objects, to find the one
1025 containing the pc. Note that pc_begin is sorted descending, and
1026 we expect objects to be non-overlapping. */
1027 for (ob
= seen_objects
; ob
; ob
= ob
->next
)
1028 if (pc
>= ob
->pc_begin
)
1030 f
= search_object (ob
, pc
);
1036 /* Classify and search the objects we've not yet processed. */
1037 while ((ob
= unseen_objects
))
1041 unseen_objects
= ob
->next
;
1042 f
= search_object (ob
, pc
);
1044 /* Insert the object into the classified list. */
1045 for (p
= &seen_objects
; *p
; p
= &(*p
)->next
)
1046 if ((*p
)->pc_begin
< ob
->pc_begin
)
1056 __gthread_mutex_unlock (&object_mutex
);
1063 bases
->tbase
= ob
->tbase
;
1064 bases
->dbase
= ob
->dbase
;
1066 encoding
= ob
->s
.b
.encoding
;
1067 if (ob
->s
.b
.mixed_encoding
)
1068 encoding
= get_fde_encoding (f
);
1069 read_encoded_value_with_base (encoding
, base_from_object (encoding
, ob
),
1070 f
->pc_begin
, &func
);
1071 bases
->func
= (void *) func
;