1 /* ARM EABI compliant unwinding routines.
2 Copyright (C) 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Paul Brook
5 This file is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published by the
7 Free Software Foundation; either version 2, or (at your option) any
10 In addition to the permissions in the GNU General Public License, the
11 Free Software Foundation gives you unlimited permission to link the
12 compiled version of this file into combinations with other programs,
13 and to distribute those combinations without any restriction coming
14 from the use of this file. (The General Public License restrictions
15 do apply in other respects; for example, they cover modification of
16 the file, and distribution when not linked into a combine
19 This file is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
27 Boston, MA 02110-1301, USA. */
30 /* We add a prototype for abort here to avoid creating a dependency on
32 extern void abort (void);
34 /* Definitions for C++ runtime support routines. We make these weak
35 declarations to avoid pulling in libsupc++ unnecessarily. */
36 typedef unsigned char bool;
38 typedef struct _ZSt9type_info type_info
; /* This names C++ type_info type */
40 void __attribute__((weak
)) __cxa_call_unexpected(_Unwind_Control_Block
*ucbp
);
41 bool __attribute__((weak
)) __cxa_begin_cleanup(_Unwind_Control_Block
*ucbp
);
42 bool __attribute__((weak
)) __cxa_type_match(_Unwind_Control_Block
*ucbp
,
43 const type_info
*rttip
,
45 void **matched_object
);
47 _Unwind_Ptr
__attribute__((weak
))
48 __gnu_Unwind_Find_exidx (_Unwind_Ptr
, int *);
56 #define EXIDX_CANTUNWIND 1
57 #define uint32_highbit (((_uw) 1) << 31)
59 #define UCB_FORCED_STOP_FN(ucbp) ((ucbp)->unwinder_cache.reserved1)
60 #define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2)
61 #define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3)
62 #define UCB_FORCED_STOP_ARG(ucbp) ((ucbp)->unwinder_cache.reserved4)
69 /* We use normal integer types here to avoid the compiler generating
70 coprocessor instructions. */
79 /* Always populated via VSTM, so no need for the "pad" field from
80 vfp_regs (which is used to store the format word for FSTMX). */
94 /* Unwind descriptors. */
108 /* The ABI specifies that the unwind routines may only use core registers,
109 except when actually manipulating coprocessor state. This allows
110 us to write one implementation that works on all platforms by
111 demand-saving coprocessor registers.
113 During unwinding we hold the coprocessor state in the actual hardware
114 registers and allocate demand-save areas for use during phase1
119 /* The first fields must be the same as a phase2_vrs. */
120 _uw demand_save_flags
;
121 struct core_regs core
;
122 _uw prev_sp
; /* Only valid during forced unwinding. */
124 struct vfpv3_regs vfp_regs_16_to_31
;
128 #define DEMAND_SAVE_VFP 1 /* VFP state has been saved if not set */
129 #define DEMAND_SAVE_VFP_D 2 /* VFP state is for FLDMD/FSTMD if set */
130 #define DEMAND_SAVE_VFP_V3 4 /* VFPv3 state for regs 16 .. 31 has
131 been saved if not set */
133 /* This must match the structure created by the assembly wrappers. */
136 _uw demand_save_flags
;
137 struct core_regs core
;
141 /* An exception index table entry. */
143 typedef struct __EIT_entry
149 /* Assembly helper functions. */
151 /* Restore core register state. Never returns. */
152 void __attribute__((noreturn
)) restore_core_regs (struct core_regs
*);
155 /* Coprocessor register state manipulation functions. */
157 /* Routines for FLDMX/FSTMX format... */
158 void __gnu_Unwind_Save_VFP (struct vfp_regs
* p
);
159 void __gnu_Unwind_Restore_VFP (struct vfp_regs
* p
);
161 /* ...and those for FLDMD/FSTMD format... */
162 void __gnu_Unwind_Save_VFP_D (struct vfp_regs
* p
);
163 void __gnu_Unwind_Restore_VFP_D (struct vfp_regs
* p
);
165 /* ...and those for VLDM/VSTM format, saving/restoring only registers
167 void __gnu_Unwind_Save_VFP_D_16_to_31 (struct vfpv3_regs
* p
);
168 void __gnu_Unwind_Restore_VFP_D_16_to_31 (struct vfpv3_regs
* p
);
170 /* Restore coprocessor state after phase1 unwinding. */
172 restore_non_core_regs (phase1_vrs
* vrs
)
174 if ((vrs
->demand_save_flags
& DEMAND_SAVE_VFP
) == 0)
176 if (vrs
->demand_save_flags
& DEMAND_SAVE_VFP_D
)
177 __gnu_Unwind_Restore_VFP_D (&vrs
->vfp
);
179 __gnu_Unwind_Restore_VFP (&vrs
->vfp
);
182 if ((vrs
->demand_save_flags
& DEMAND_SAVE_VFP_V3
) == 0)
183 __gnu_Unwind_Restore_VFP_D_16_to_31 (&vrs
->vfp_regs_16_to_31
);
186 /* A better way to do this would probably be to compare the absolute address
187 with a segment relative relocation of the same symbol. */
189 extern int __text_start
;
190 extern int __data_start
;
192 /* The exception index table location. */
193 extern __EIT_entry __exidx_start
;
194 extern __EIT_entry __exidx_end
;
196 /* ABI defined personality routines. */
197 extern _Unwind_Reason_Code
__aeabi_unwind_cpp_pr0 (_Unwind_State
,
198 _Unwind_Control_Block
*, _Unwind_Context
*);// __attribute__((weak));
199 extern _Unwind_Reason_Code
__aeabi_unwind_cpp_pr1 (_Unwind_State
,
200 _Unwind_Control_Block
*, _Unwind_Context
*) __attribute__((weak
));
201 extern _Unwind_Reason_Code
__aeabi_unwind_cpp_pr2 (_Unwind_State
,
202 _Unwind_Control_Block
*, _Unwind_Context
*) __attribute__((weak
));
204 /* ABI defined routine to store a virtual register to memory. */
206 _Unwind_VRS_Result
_Unwind_VRS_Get (_Unwind_Context
*context
,
207 _Unwind_VRS_RegClass regclass
,
209 _Unwind_VRS_DataRepresentation representation
,
212 phase1_vrs
*vrs
= (phase1_vrs
*) context
;
217 if (representation
!= _UVRSD_UINT32
219 return _UVRSR_FAILED
;
220 *(_uw
*) valuep
= vrs
->core
.r
[regno
];
227 return _UVRSR_NOT_IMPLEMENTED
;
230 return _UVRSR_FAILED
;
235 /* ABI defined function to load a virtual register from memory. */
237 _Unwind_VRS_Result
_Unwind_VRS_Set (_Unwind_Context
*context
,
238 _Unwind_VRS_RegClass regclass
,
240 _Unwind_VRS_DataRepresentation representation
,
243 phase1_vrs
*vrs
= (phase1_vrs
*) context
;
248 if (representation
!= _UVRSD_UINT32
250 return _UVRSR_FAILED
;
252 vrs
->core
.r
[regno
] = *(_uw
*) valuep
;
259 return _UVRSR_NOT_IMPLEMENTED
;
262 return _UVRSR_FAILED
;
267 /* ABI defined function to pop registers off the stack. */
269 _Unwind_VRS_Result
_Unwind_VRS_Pop (_Unwind_Context
*context
,
270 _Unwind_VRS_RegClass regclass
,
272 _Unwind_VRS_DataRepresentation representation
)
274 phase1_vrs
*vrs
= (phase1_vrs
*) context
;
284 if (representation
!= _UVRSD_UINT32
)
285 return _UVRSR_FAILED
;
287 mask
= discriminator
& 0xffff;
288 ptr
= (_uw
*) vrs
->core
.r
[R_SP
];
289 /* Pop the requested registers. */
290 for (i
= 0; i
< 16; i
++)
293 vrs
->core
.r
[i
] = *(ptr
++);
295 /* Writeback the stack pointer value if it wasn't restored. */
296 if ((mask
& (1 << R_SP
)) == 0)
297 vrs
->core
.r
[R_SP
] = (_uw
) ptr
;
303 _uw start
= discriminator
>> 16;
304 _uw count
= discriminator
& 0xffff;
306 struct vfpv3_regs tmp_16_to_31
;
310 int num_vfpv3_regs
= 0;
312 /* We use an approximation here by bounding _UVRSD_DOUBLE
313 register numbers at 32 always, since we can't detect if
314 VFPv3 isn't present (in such a case the upper limit is 16). */
315 if ((representation
!= _UVRSD_VFPX
&& representation
!= _UVRSD_DOUBLE
)
316 || start
+ count
> (representation
== _UVRSD_VFPX
? 16 : 32)
317 || (representation
== _UVRSD_VFPX
&& start
>= 16))
318 return _UVRSR_FAILED
;
320 /* Check if we're being asked to pop VFPv3-only registers
321 (numbers 16 through 31). */
323 num_vfpv3_regs
= count
;
324 else if (start
+ count
> 16)
325 num_vfpv3_regs
= start
+ count
- 16;
327 if (num_vfpv3_regs
&& representation
!= _UVRSD_DOUBLE
)
328 return _UVRSR_FAILED
;
330 /* Demand-save coprocessor registers for stage1. */
331 if (start
< 16 && (vrs
->demand_save_flags
& DEMAND_SAVE_VFP
))
333 vrs
->demand_save_flags
&= ~DEMAND_SAVE_VFP
;
335 if (representation
== _UVRSD_DOUBLE
)
337 /* Save in FLDMD/FSTMD format. */
338 vrs
->demand_save_flags
|= DEMAND_SAVE_VFP_D
;
339 __gnu_Unwind_Save_VFP_D (&vrs
->vfp
);
343 /* Save in FLDMX/FSTMX format. */
344 vrs
->demand_save_flags
&= ~DEMAND_SAVE_VFP_D
;
345 __gnu_Unwind_Save_VFP (&vrs
->vfp
);
349 if (num_vfpv3_regs
> 0
350 && (vrs
->demand_save_flags
& DEMAND_SAVE_VFP_V3
))
352 vrs
->demand_save_flags
&= ~DEMAND_SAVE_VFP_V3
;
353 __gnu_Unwind_Save_VFP_D_16_to_31 (&vrs
->vfp_regs_16_to_31
);
356 /* Restore the registers from the stack. Do this by saving the
357 current VFP registers to a memory area, moving the in-memory
358 values into that area, and restoring from the whole area.
359 For _UVRSD_VFPX we assume FSTMX standard format 1. */
360 if (representation
== _UVRSD_VFPX
)
361 __gnu_Unwind_Save_VFP (&tmp
);
364 /* Save registers 0 .. 15 if required. */
366 __gnu_Unwind_Save_VFP_D (&tmp
);
368 /* Save VFPv3 registers 16 .. 31 if required. */
370 __gnu_Unwind_Save_VFP_D_16_to_31 (&tmp_16_to_31
);
373 /* Work out how many registers below register 16 need popping. */
374 tmp_count
= num_vfpv3_regs
> 0 ? 16 - start
: count
;
376 /* Copy registers below 16, if needed.
377 The stack address is only guaranteed to be word aligned, so
378 we can't use doubleword copies. */
379 sp
= (_uw
*) vrs
->core
.r
[R_SP
];
383 dest
= (_uw
*) &tmp
.d
[start
];
388 /* Copy VFPv3 registers numbered >= 16, if needed. */
389 if (num_vfpv3_regs
> 0)
391 /* num_vfpv3_regs is needed below, so copy it. */
392 int tmp_count_2
= num_vfpv3_regs
* 2;
393 int vfpv3_start
= start
< 16 ? 16 : start
;
395 dest
= (_uw
*) &tmp_16_to_31
.d
[vfpv3_start
- 16];
396 while (tmp_count_2
--)
400 /* Skip the format word space if using FLDMX/FSTMX format. */
401 if (representation
== _UVRSD_VFPX
)
404 /* Set the new stack pointer. */
405 vrs
->core
.r
[R_SP
] = (_uw
) sp
;
407 /* Reload the registers. */
408 if (representation
== _UVRSD_VFPX
)
409 __gnu_Unwind_Restore_VFP (&tmp
);
412 /* Restore registers 0 .. 15 if required. */
414 __gnu_Unwind_Restore_VFP_D (&tmp
);
416 /* Restore VFPv3 registers 16 .. 31 if required. */
417 if (num_vfpv3_regs
> 0)
418 __gnu_Unwind_Restore_VFP_D_16_to_31 (&tmp_16_to_31
);
426 return _UVRSR_NOT_IMPLEMENTED
;
429 return _UVRSR_FAILED
;
434 /* Core unwinding functions. */
436 /* Calculate the address encoded by a 31-bit self-relative offset at address
439 selfrel_offset31 (const _uw
*p
)
444 /* Sign extend to 32 bits. */
445 if (offset
& (1 << 30))
448 offset
&= ~(1u << 31);
450 return offset
+ (_uw
) p
;
454 /* Perform a binary search for RETURN_ADDRESS in TABLE. The table contains
457 static const __EIT_entry
*
458 search_EIT_table (const __EIT_entry
* table
, int nrec
, _uw return_address
)
465 return (__EIT_entry
*) 0;
472 n
= (left
+ right
) / 2;
473 this_fn
= selfrel_offset31 (&table
[n
].fnoffset
);
475 next_fn
= selfrel_offset31 (&table
[n
+ 1].fnoffset
) - 1;
477 next_fn
= (_uw
)0 - 1;
479 if (return_address
< this_fn
)
482 return (__EIT_entry
*) 0;
485 else if (return_address
<= next_fn
)
492 /* Find the exception index table eintry for the given address.
493 Fill in the relevant fields of the UCB.
494 Returns _URC_FAILURE if an error occurred, _URC_OK on success. */
496 static _Unwind_Reason_Code
497 get_eit_entry (_Unwind_Control_Block
*ucbp
, _uw return_address
)
499 const __EIT_entry
* eitp
;
502 /* The return address is the address of the instruction following the
503 call instruction (plus one in thumb mode). If this was the last
504 instruction in the function the address will lie in the following
505 function. Subtract 2 from the address so that it points within the call
506 instruction itself. */
509 if (__gnu_Unwind_Find_exidx
)
511 eitp
= (const __EIT_entry
*) __gnu_Unwind_Find_exidx (return_address
,
515 UCB_PR_ADDR (ucbp
) = 0;
521 eitp
= &__exidx_start
;
522 nrec
= &__exidx_end
- &__exidx_start
;
525 eitp
= search_EIT_table (eitp
, nrec
, return_address
);
529 UCB_PR_ADDR (ucbp
) = 0;
532 ucbp
->pr_cache
.fnstart
= selfrel_offset31 (&eitp
->fnoffset
);
534 /* Can this frame be unwound at all? */
535 if (eitp
->content
== EXIDX_CANTUNWIND
)
537 UCB_PR_ADDR (ucbp
) = 0;
538 return _URC_END_OF_STACK
;
541 /* Obtain the address of the "real" __EHT_Header word. */
543 if (eitp
->content
& uint32_highbit
)
545 /* It is immediate data. */
546 ucbp
->pr_cache
.ehtp
= (_Unwind_EHT_Header
*)&eitp
->content
;
547 ucbp
->pr_cache
.additional
= 1;
551 /* The low 31 bits of the content field are a self-relative
552 offset to an _Unwind_EHT_Entry structure. */
553 ucbp
->pr_cache
.ehtp
=
554 (_Unwind_EHT_Header
*) selfrel_offset31 (&eitp
->content
);
555 ucbp
->pr_cache
.additional
= 0;
558 /* Discover the personality routine address. */
559 if (*ucbp
->pr_cache
.ehtp
& (1u << 31))
561 /* One of the predefined standard routines. */
562 _uw idx
= (*(_uw
*) ucbp
->pr_cache
.ehtp
>> 24) & 0xf;
564 UCB_PR_ADDR (ucbp
) = (_uw
) &__aeabi_unwind_cpp_pr0
;
566 UCB_PR_ADDR (ucbp
) = (_uw
) &__aeabi_unwind_cpp_pr1
;
568 UCB_PR_ADDR (ucbp
) = (_uw
) &__aeabi_unwind_cpp_pr2
;
571 UCB_PR_ADDR (ucbp
) = 0;
577 /* Execute region offset to PR */
578 UCB_PR_ADDR (ucbp
) = selfrel_offset31 (ucbp
->pr_cache
.ehtp
);
584 /* Perform phase2 unwinding. VRS is the initial virtual register state. */
586 static void __attribute__((noreturn
))
587 unwind_phase2 (_Unwind_Control_Block
* ucbp
, phase2_vrs
* vrs
)
589 _Unwind_Reason_Code pr_result
;
593 /* Find the entry for this routine. */
594 if (get_eit_entry (ucbp
, vrs
->core
.r
[R_PC
]) != _URC_OK
)
597 UCB_SAVED_CALLSITE_ADDR (ucbp
) = vrs
->core
.r
[R_PC
];
599 /* Call the pr to decide what to do. */
600 pr_result
= ((personality_routine
) UCB_PR_ADDR (ucbp
))
601 (_US_UNWIND_FRAME_STARTING
, ucbp
, (_Unwind_Context
*) vrs
);
603 while (pr_result
== _URC_CONTINUE_UNWIND
);
605 if (pr_result
!= _URC_INSTALL_CONTEXT
)
608 restore_core_regs (&vrs
->core
);
611 /* Perform phase2 forced unwinding. */
613 static _Unwind_Reason_Code
614 unwind_phase2_forced (_Unwind_Control_Block
*ucbp
, phase2_vrs
*entry_vrs
,
617 _Unwind_Stop_Fn stop_fn
= (_Unwind_Stop_Fn
) UCB_FORCED_STOP_FN (ucbp
);
618 void *stop_arg
= (void *)UCB_FORCED_STOP_ARG (ucbp
);
619 _Unwind_Reason_Code pr_result
= 0;
620 /* We use phase1_vrs here even though we do not demand save, for the
622 phase1_vrs saved_vrs
, next_vrs
;
624 /* Save the core registers. */
625 saved_vrs
.core
= entry_vrs
->core
;
626 /* We don't need to demand-save the non-core registers, because we
627 unwind in a single pass. */
628 saved_vrs
.demand_save_flags
= 0;
630 /* Unwind until we reach a propagation barrier. */
633 _Unwind_State action
;
634 _Unwind_Reason_Code entry_code
;
635 _Unwind_Reason_Code stop_code
;
637 /* Find the entry for this routine. */
638 entry_code
= get_eit_entry (ucbp
, saved_vrs
.core
.r
[R_PC
]);
642 action
= _US_UNWIND_FRAME_RESUME
| _US_FORCE_UNWIND
;
646 action
= _US_UNWIND_FRAME_STARTING
| _US_FORCE_UNWIND
;
648 if (entry_code
== _URC_OK
)
650 UCB_SAVED_CALLSITE_ADDR (ucbp
) = saved_vrs
.core
.r
[R_PC
];
652 next_vrs
= saved_vrs
;
654 /* Call the pr to decide what to do. */
655 pr_result
= ((personality_routine
) UCB_PR_ADDR (ucbp
))
656 (action
, ucbp
, (void *) &next_vrs
);
658 saved_vrs
.prev_sp
= next_vrs
.core
.r
[R_SP
];
662 /* Treat any failure as the end of unwinding, to cope more
663 gracefully with missing EH information. Mixed EH and
664 non-EH within one object will usually result in failure,
665 because the .ARM.exidx tables do not indicate the end
666 of the code to which they apply; but mixed EH and non-EH
667 shared objects should return an unwind failure at the
668 entry of a non-EH shared object. */
669 action
|= _US_END_OF_STACK
;
671 saved_vrs
.prev_sp
= saved_vrs
.core
.r
[R_SP
];
674 stop_code
= stop_fn (1, action
, ucbp
->exception_class
, ucbp
,
675 (void *)&saved_vrs
, stop_arg
);
676 if (stop_code
!= _URC_NO_REASON
)
679 if (entry_code
!= _URC_OK
)
682 saved_vrs
= next_vrs
;
684 while (pr_result
== _URC_CONTINUE_UNWIND
);
686 if (pr_result
!= _URC_INSTALL_CONTEXT
)
688 /* Some sort of failure has occurred in the pr and probably the
689 pr returned _URC_FAILURE. */
693 restore_core_regs (&saved_vrs
.core
);
696 /* This is a very limited implementation of _Unwind_GetCFA. It returns
697 the stack pointer as it is about to be unwound, and is only valid
698 while calling the stop function during forced unwinding. If the
699 current personality routine result is going to run a cleanup, this
700 will not be the CFA; but when the frame is really unwound, it will
704 _Unwind_GetCFA (_Unwind_Context
*context
)
706 return ((phase1_vrs
*) context
)->prev_sp
;
709 /* Perform phase1 unwinding. UCBP is the exception being thrown, and
710 entry_VRS is the register state on entry to _Unwind_RaiseException. */
713 __gnu_Unwind_RaiseException (_Unwind_Control_Block
*, phase2_vrs
*);
716 __gnu_Unwind_RaiseException (_Unwind_Control_Block
* ucbp
,
717 phase2_vrs
* entry_vrs
)
719 phase1_vrs saved_vrs
;
720 _Unwind_Reason_Code pr_result
;
722 /* Set the pc to the call site. */
723 entry_vrs
->core
.r
[R_PC
] = entry_vrs
->core
.r
[R_LR
];
725 /* Save the core registers. */
726 saved_vrs
.core
= entry_vrs
->core
;
727 /* Set demand-save flags. */
728 saved_vrs
.demand_save_flags
= ~(_uw
) 0;
730 /* Unwind until we reach a propagation barrier. */
733 /* Find the entry for this routine. */
734 if (get_eit_entry (ucbp
, saved_vrs
.core
.r
[R_PC
]) != _URC_OK
)
737 /* Call the pr to decide what to do. */
738 pr_result
= ((personality_routine
) UCB_PR_ADDR (ucbp
))
739 (_US_VIRTUAL_UNWIND_FRAME
, ucbp
, (void *) &saved_vrs
);
741 while (pr_result
== _URC_CONTINUE_UNWIND
);
743 /* We've unwound as far as we want to go, so restore the original
745 restore_non_core_regs (&saved_vrs
);
746 if (pr_result
!= _URC_HANDLER_FOUND
)
748 /* Some sort of failure has occurred in the pr and probably the
749 pr returned _URC_FAILURE. */
753 unwind_phase2 (ucbp
, entry_vrs
);
756 /* Resume unwinding after a cleanup has been run. UCBP is the exception
757 being thrown and ENTRY_VRS is the register state on entry to
760 __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block
*,
761 _Unwind_Stop_Fn
, void *, phase2_vrs
*);
764 __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block
*ucbp
,
765 _Unwind_Stop_Fn stop_fn
, void *stop_arg
,
766 phase2_vrs
*entry_vrs
)
768 UCB_FORCED_STOP_FN (ucbp
) = (_uw
) stop_fn
;
769 UCB_FORCED_STOP_ARG (ucbp
) = (_uw
) stop_arg
;
771 /* Set the pc to the call site. */
772 entry_vrs
->core
.r
[R_PC
] = entry_vrs
->core
.r
[R_LR
];
774 return unwind_phase2_forced (ucbp
, entry_vrs
, 0);
778 __gnu_Unwind_Resume (_Unwind_Control_Block
*, phase2_vrs
*);
781 __gnu_Unwind_Resume (_Unwind_Control_Block
* ucbp
, phase2_vrs
* entry_vrs
)
783 _Unwind_Reason_Code pr_result
;
785 /* Recover the saved address. */
786 entry_vrs
->core
.r
[R_PC
] = UCB_SAVED_CALLSITE_ADDR (ucbp
);
788 if (UCB_FORCED_STOP_FN (ucbp
))
790 unwind_phase2_forced (ucbp
, entry_vrs
, 1);
792 /* We can't return failure at this point. */
796 /* Call the cached PR. */
797 pr_result
= ((personality_routine
) UCB_PR_ADDR (ucbp
))
798 (_US_UNWIND_FRAME_RESUME
, ucbp
, (_Unwind_Context
*) entry_vrs
);
802 case _URC_INSTALL_CONTEXT
:
803 /* Upload the registers to enter the landing pad. */
804 restore_core_regs (&entry_vrs
->core
);
806 case _URC_CONTINUE_UNWIND
:
807 /* Continue unwinding the next frame. */
808 unwind_phase2 (ucbp
, entry_vrs
);
816 __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block
*, phase2_vrs
*);
819 __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block
* ucbp
,
820 phase2_vrs
* entry_vrs
)
822 if (!UCB_FORCED_STOP_FN (ucbp
))
823 return __gnu_Unwind_RaiseException (ucbp
, entry_vrs
);
825 /* Set the pc to the call site. */
826 entry_vrs
->core
.r
[R_PC
] = entry_vrs
->core
.r
[R_LR
];
827 /* Continue unwinding the next frame. */
828 return unwind_phase2_forced (ucbp
, entry_vrs
, 0);
831 /* Clean up an exception object when unwinding is complete. */
833 _Unwind_Complete (_Unwind_Control_Block
* ucbp
__attribute__((unused
)))
838 /* Get the _Unwind_Control_Block from an _Unwind_Context. */
840 static inline _Unwind_Control_Block
*
841 unwind_UCB_from_context (_Unwind_Context
* context
)
843 return (_Unwind_Control_Block
*) _Unwind_GetGR (context
, R_IP
);
847 /* Free an exception. */
850 _Unwind_DeleteException (_Unwind_Exception
* exc
)
852 if (exc
->exception_cleanup
)
853 (*exc
->exception_cleanup
) (_URC_FOREIGN_EXCEPTION_CAUGHT
, exc
);
857 /* Common implementation for ARM ABI defined personality routines.
858 ID is the index of the personality routine, other arguments are as defined
859 by __aeabi_unwind_cpp_pr{0,1,2}. */
861 static _Unwind_Reason_Code
862 __gnu_unwind_pr_common (_Unwind_State state
,
863 _Unwind_Control_Block
*ucbp
,
864 _Unwind_Context
*context
,
867 __gnu_unwind_state uws
;
872 int phase2_call_unexpected_after_unwind
= 0;
874 int forced_unwind
= state
& _US_FORCE_UNWIND
;
876 state
&= _US_ACTION_MASK
;
878 data
= (_uw
*) ucbp
->pr_cache
.ehtp
;
879 uws
.data
= *(data
++);
889 uws
.words_left
= (uws
.data
>> 16) & 0xff;
892 data
+= uws
.words_left
;
895 /* Restore the saved pointer. */
896 if (state
== _US_UNWIND_FRAME_RESUME
)
897 data
= (_uw
*) ucbp
->cleanup_cache
.bitpattern
[0];
899 if ((ucbp
->pr_cache
.additional
& 1) == 0)
901 /* Process descriptors. */
909 len
= ((EHT32
*) data
)->length
;
910 offset
= ((EHT32
*) data
)->offset
;
915 len
= ((EHT16
*) data
)->length
;
916 offset
= ((EHT16
*) data
)->offset
;
920 fnstart
= ucbp
->pr_cache
.fnstart
+ (offset
& ~1);
921 addr
= _Unwind_GetGR (context
, R_PC
);
922 in_range
= (fnstart
<= addr
&& addr
< fnstart
+ (len
& ~1));
924 switch (((offset
& 1) << 1) | (len
& 1))
928 if (state
!= _US_VIRTUAL_UNWIND_FRAME
931 /* Cleanup in range, and we are running cleanups. */
934 /* Landing pad address is 31-bit pc-relative offset. */
935 lp
= selfrel_offset31 (data
);
937 /* Save the exception data pointer. */
938 ucbp
->cleanup_cache
.bitpattern
[0] = (_uw
) data
;
939 if (!__cxa_begin_cleanup (ucbp
))
941 /* Setup the VRS to enter the landing pad. */
942 _Unwind_SetGR (context
, R_PC
, lp
);
943 return _URC_INSTALL_CONTEXT
;
945 /* Cleanup not in range, or we are in stage 1. */
951 if (state
== _US_VIRTUAL_UNWIND_FRAME
)
955 /* Check for a barrier. */
957 bool is_reference
= (data
[0] & uint32_highbit
) != 0;
960 /* Check for no-throw areas. */
961 if (data
[1] == (_uw
) -2)
964 /* The thrown object immediately follows the ECB. */
965 matched
= (void *)(ucbp
+ 1);
966 if (data
[1] != (_uw
) -1)
968 /* Match a catch specification. */
969 rtti
= _Unwind_decode_target2 ((_uw
) &data
[1]);
970 if (!__cxa_type_match (ucbp
, (type_info
*) rtti
,
978 ucbp
->barrier_cache
.sp
=
979 _Unwind_GetGR (context
, R_SP
);
980 ucbp
->barrier_cache
.bitpattern
[0] = (_uw
) matched
;
981 ucbp
->barrier_cache
.bitpattern
[1] = (_uw
) data
;
982 return _URC_HANDLER_FOUND
;
985 /* Handler out of range, or not matched. */
987 else if (ucbp
->barrier_cache
.sp
== _Unwind_GetGR (context
, R_SP
)
988 && ucbp
->barrier_cache
.bitpattern
[1] == (_uw
) data
)
990 /* Matched a previous propagation barrier. */
993 /* Setup for entry to the handler. */
994 lp
= selfrel_offset31 (data
);
995 _Unwind_SetGR (context
, R_PC
, lp
);
996 _Unwind_SetGR (context
, 0, (_uw
) ucbp
);
997 return _URC_INSTALL_CONTEXT
;
999 /* Catch handler not matched. Advance to the next descriptor. */
1004 rtti_count
= data
[0] & 0x7fffffff;
1005 /* Exception specification. */
1006 if (state
== _US_VIRTUAL_UNWIND_FRAME
)
1008 if (in_range
&& (!forced_unwind
|| !rtti_count
))
1010 /* Match against the exception specification. */
1015 for (i
= 0; i
< rtti_count
; i
++)
1017 matched
= (void *)(ucbp
+ 1);
1018 rtti
= _Unwind_decode_target2 ((_uw
) &data
[i
+ 1]);
1019 if (__cxa_type_match (ucbp
, (type_info
*) rtti
, 0,
1024 if (i
== rtti_count
)
1026 /* Exception does not match the spec. */
1027 ucbp
->barrier_cache
.sp
=
1028 _Unwind_GetGR (context
, R_SP
);
1029 ucbp
->barrier_cache
.bitpattern
[0] = (_uw
) matched
;
1030 ucbp
->barrier_cache
.bitpattern
[1] = (_uw
) data
;
1031 return _URC_HANDLER_FOUND
;
1034 /* Handler out of range, or exception is permitted. */
1036 else if (ucbp
->barrier_cache
.sp
== _Unwind_GetGR (context
, R_SP
)
1037 && ucbp
->barrier_cache
.bitpattern
[1] == (_uw
) data
)
1039 /* Matched a previous propagation barrier. */
1041 /* Record the RTTI list for __cxa_call_unexpected. */
1042 ucbp
->barrier_cache
.bitpattern
[1] = rtti_count
;
1043 ucbp
->barrier_cache
.bitpattern
[2] = 0;
1044 ucbp
->barrier_cache
.bitpattern
[3] = 4;
1045 ucbp
->barrier_cache
.bitpattern
[4] = (_uw
) &data
[1];
1047 if (data
[0] & uint32_highbit
)
1048 phase2_call_unexpected_after_unwind
= 1;
1051 data
+= rtti_count
+ 1;
1052 /* Setup for entry to the handler. */
1053 lp
= selfrel_offset31 (data
);
1055 _Unwind_SetGR (context
, R_PC
, lp
);
1056 _Unwind_SetGR (context
, 0, (_uw
) ucbp
);
1057 return _URC_INSTALL_CONTEXT
;
1060 if (data
[0] & uint32_highbit
)
1062 data
+= rtti_count
+ 1;
1066 /* Should never happen. */
1067 return _URC_FAILURE
;
1069 /* Finished processing this descriptor. */
1073 if (__gnu_unwind_execute (context
, &uws
) != _URC_OK
)
1074 return _URC_FAILURE
;
1076 if (phase2_call_unexpected_after_unwind
)
1078 /* Enter __cxa_unexpected as if called from the call site. */
1079 _Unwind_SetGR (context
, R_LR
, _Unwind_GetGR (context
, R_PC
));
1080 _Unwind_SetGR (context
, R_PC
, (_uw
) &__cxa_call_unexpected
);
1081 return _URC_INSTALL_CONTEXT
;
1084 return _URC_CONTINUE_UNWIND
;
1088 /* ABI defined personality routine entry points. */
1091 __aeabi_unwind_cpp_pr0 (_Unwind_State state
,
1092 _Unwind_Control_Block
*ucbp
,
1093 _Unwind_Context
*context
)
1095 return __gnu_unwind_pr_common (state
, ucbp
, context
, 0);
1099 __aeabi_unwind_cpp_pr1 (_Unwind_State state
,
1100 _Unwind_Control_Block
*ucbp
,
1101 _Unwind_Context
*context
)
1103 return __gnu_unwind_pr_common (state
, ucbp
, context
, 1);
1107 __aeabi_unwind_cpp_pr2 (_Unwind_State state
,
1108 _Unwind_Control_Block
*ucbp
,
1109 _Unwind_Context
*context
)
1111 return __gnu_unwind_pr_common (state
, ucbp
, context
, 2);