2006-11-15 Paul Brook <paul@codesourcery.com>
[official-gcc.git] / gcc / config / arm / unwind-arm.c
blob6b7e2ddcd2633327eb06de7c491a37e7192d99fa
1 /* ARM EABI compliant unwinding routines.
2 Copyright (C) 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Paul Brook
5 This file is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published by the
7 Free Software Foundation; either version 2, or (at your option) any
8 later version.
10 In addition to the permissions in the GNU General Public License, the
11 Free Software Foundation gives you unlimited permission to link the
12 compiled version of this file into combinations with other programs,
13 and to distribute those combinations without any restriction coming
14 from the use of this file. (The General Public License restrictions
15 do apply in other respects; for example, they cover modification of
16 the file, and distribution when not linked into a combine
17 executable.)
19 This file is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
27 Boston, MA 02110-1301, USA. */
28 #include "unwind.h"
30 /* We add a prototype for abort here to avoid creating a dependency on
31 target headers. */
32 extern void abort (void);
34 /* Definitions for C++ runtime support routines. We make these weak
35 declarations to avoid pulling in libsupc++ unnecessarily. */
36 typedef unsigned char bool;
38 typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */
40 void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp);
41 bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp);
42 bool __attribute__((weak)) __cxa_type_match(_Unwind_Control_Block *ucbp,
43 const type_info *rttip,
44 void **matched_object);
46 _Unwind_Ptr __attribute__((weak))
47 __gnu_Unwind_Find_exidx (_Unwind_Ptr, int *);
49 /* Misc constants. */
50 #define R_IP 12
51 #define R_SP 13
52 #define R_LR 14
53 #define R_PC 15
55 #define EXIDX_CANTUNWIND 1
56 #define uint32_highbit (((_uw) 1) << 31)
58 #define UCB_FORCED_STOP_FN(ucbp) ((ucbp)->unwinder_cache.reserved1)
59 #define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2)
60 #define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3)
61 #define UCB_FORCED_STOP_ARG(ucbp) ((ucbp)->unwinder_cache.reserved4)
63 struct core_regs
65 _uw r[16];
68 /* We use normal integer types here to avoid the compiler generating
69 coprocessor instructions. */
70 struct vfp_regs
72 _uw64 d[16];
73 _uw pad;
76 struct fpa_reg
78 _uw w[3];
81 struct fpa_regs
83 struct fpa_reg f[8];
86 /* Unwind descriptors. */
88 typedef struct
90 _uw16 length;
91 _uw16 offset;
92 } EHT16;
94 typedef struct
96 _uw length;
97 _uw offset;
98 } EHT32;
100 /* The ABI specifies that the unwind routines may only use core registers,
101 except when actually manipulating coprocessor state. This allows
102 us to write one implementation that works on all platforms by
103 demand-saving coprocessor registers.
105 During unwinding we hold the coprocessor state in the actual hardware
106 registers and allocate demand-save areas for use during phase1
107 unwinding. */
109 typedef struct
111 /* The first fields must be the same as a phase2_vrs. */
112 _uw demand_save_flags;
113 struct core_regs core;
114 _uw prev_sp; /* Only valid during forced unwinding. */
115 struct vfp_regs vfp;
116 struct fpa_regs fpa;
117 } phase1_vrs;
119 #define DEMAND_SAVE_VFP 1
121 /* This must match the structure created by the assembly wrappers. */
122 typedef struct
124 _uw demand_save_flags;
125 struct core_regs core;
126 } phase2_vrs;
129 /* An exception index table entry. */
131 typedef struct __EIT_entry
133 _uw fnoffset;
134 _uw content;
135 } __EIT_entry;
137 /* Assembly helper functions. */
139 /* Restore core register state. Never returns. */
140 void __attribute__((noreturn)) restore_core_regs (struct core_regs *);
143 /* Coprocessor register state manipulation functions. */
145 void __gnu_Unwind_Save_VFP (struct vfp_regs * p);
146 void __gnu_Unwind_Restore_VFP (struct vfp_regs * p);
148 /* Restore coprocessor state after phase1 unwinding. */
149 static void
150 restore_non_core_regs (phase1_vrs * vrs)
152 if ((vrs->demand_save_flags & DEMAND_SAVE_VFP) == 0)
153 __gnu_Unwind_Restore_VFP (&vrs->vfp);
156 /* A better way to do this would probably be to compare the absolute address
157 with a segment relative relocation of the same symbol. */
159 extern int __text_start;
160 extern int __data_start;
162 /* The exception index table location. */
163 extern __EIT_entry __exidx_start;
164 extern __EIT_entry __exidx_end;
166 /* ABI defined personality routines. */
167 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr0 (_Unwind_State,
168 _Unwind_Control_Block *, _Unwind_Context *);// __attribute__((weak));
169 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr1 (_Unwind_State,
170 _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
171 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr2 (_Unwind_State,
172 _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
174 /* ABI defined routine to store a virtual register to memory. */
176 _Unwind_VRS_Result _Unwind_VRS_Get (_Unwind_Context *context,
177 _Unwind_VRS_RegClass regclass,
178 _uw regno,
179 _Unwind_VRS_DataRepresentation representation,
180 void *valuep)
182 phase1_vrs *vrs = (phase1_vrs *) context;
184 switch (regclass)
186 case _UVRSC_CORE:
187 if (representation != _UVRSD_UINT32
188 || regno > 15)
189 return _UVRSR_FAILED;
190 *(_uw *) valuep = vrs->core.r[regno];
191 return _UVRSR_OK;
193 case _UVRSC_VFP:
194 case _UVRSC_FPA:
195 case _UVRSC_WMMXD:
196 case _UVRSC_WMMXC:
197 return _UVRSR_NOT_IMPLEMENTED;
199 default:
200 return _UVRSR_FAILED;
205 /* ABI defined function to load a virtual register from memory. */
207 _Unwind_VRS_Result _Unwind_VRS_Set (_Unwind_Context *context,
208 _Unwind_VRS_RegClass regclass,
209 _uw regno,
210 _Unwind_VRS_DataRepresentation representation,
211 void *valuep)
213 phase1_vrs *vrs = (phase1_vrs *) context;
215 switch (regclass)
217 case _UVRSC_CORE:
218 if (representation != _UVRSD_UINT32
219 || regno > 15)
220 return _UVRSR_FAILED;
222 vrs->core.r[regno] = *(_uw *) valuep;
223 return _UVRSR_OK;
225 case _UVRSC_VFP:
226 case _UVRSC_FPA:
227 case _UVRSC_WMMXD:
228 case _UVRSC_WMMXC:
229 return _UVRSR_NOT_IMPLEMENTED;
231 default:
232 return _UVRSR_FAILED;
237 /* ABI defined function to pop registers off the stack. */
239 _Unwind_VRS_Result _Unwind_VRS_Pop (_Unwind_Context *context,
240 _Unwind_VRS_RegClass regclass,
241 _uw discriminator,
242 _Unwind_VRS_DataRepresentation representation)
244 phase1_vrs *vrs = (phase1_vrs *) context;
246 switch (regclass)
248 case _UVRSC_CORE:
250 _uw *ptr;
251 _uw mask;
252 int i;
254 if (representation != _UVRSD_UINT32)
255 return _UVRSR_FAILED;
257 mask = discriminator & 0xffff;
258 ptr = (_uw *) vrs->core.r[R_SP];
259 /* Pop the requested registers. */
260 for (i = 0; i < 16; i++)
262 if (mask & (1 << i))
263 vrs->core.r[i] = *(ptr++);
265 /* Writeback the stack pointer value if it wasn't restored. */
266 if ((mask & (1 << R_SP)) == 0)
267 vrs->core.r[R_SP] = (_uw) ptr;
269 return _UVRSR_OK;
271 case _UVRSC_VFP:
273 _uw start = discriminator >> 16;
274 _uw count = discriminator & 0xffff;
275 struct vfp_regs tmp;
276 _uw *sp;
277 _uw *dest;
279 if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE)
280 || start + count > 16)
281 return _UVRSR_FAILED;
283 if (vrs->demand_save_flags & DEMAND_SAVE_VFP)
285 /* Demand-save resisters for stage1. */
286 vrs->demand_save_flags &= ~DEMAND_SAVE_VFP;
287 __gnu_Unwind_Save_VFP (&vrs->vfp);
290 /* Restore the registers from the stack. Do this by saving the
291 current VFP registers to a memory area, moving the in-memory
292 values into that area, and restoring from the whole area.
293 For _UVRSD_VFPX we assume FSTMX standard format 1. */
294 __gnu_Unwind_Save_VFP (&tmp);
296 /* The stack address is only guaranteed to be word aligned, so
297 we can't use doubleword copies. */
298 sp = (_uw *) vrs->core.r[R_SP];
299 dest = (_uw *) &tmp.d[start];
300 count *= 2;
301 while (count--)
302 *(dest++) = *(sp++);
304 /* Skip the pad word */
305 if (representation == _UVRSD_VFPX)
306 sp++;
308 /* Set the new stack pointer. */
309 vrs->core.r[R_SP] = (_uw) sp;
311 /* Reload the registers. */
312 __gnu_Unwind_Restore_VFP (&tmp);
314 return _UVRSR_OK;
316 case _UVRSC_FPA:
317 case _UVRSC_WMMXD:
318 case _UVRSC_WMMXC:
319 return _UVRSR_NOT_IMPLEMENTED;
321 default:
322 return _UVRSR_FAILED;
327 /* Core unwinding functions. */
329 /* Calculate the address encoded by a 31-bit self-relative offset at address
330 P. */
331 static inline _uw
332 selfrel_offset31 (const _uw *p)
334 _uw offset;
336 offset = *p;
337 /* Sign extend to 32 bits. */
338 if (offset & (1 << 30))
339 offset |= 1u << 31;
340 else
341 offset &= ~(1u << 31);
343 return offset + (_uw) p;
347 /* Perform a binary search for RETURN_ADDRESS in TABLE. The table contains
348 NREC entries. */
350 static const __EIT_entry *
351 search_EIT_table (const __EIT_entry * table, int nrec, _uw return_address)
353 _uw next_fn;
354 _uw this_fn;
355 int n, left, right;
357 if (nrec == 0)
358 return (__EIT_entry *) 0;
360 left = 0;
361 right = nrec - 1;
363 while (1)
365 n = (left + right) / 2;
366 this_fn = selfrel_offset31 (&table[n].fnoffset);
367 if (n != nrec - 1)
368 next_fn = selfrel_offset31 (&table[n + 1].fnoffset) - 1;
369 else
370 next_fn = (_uw)0 - 1;
372 if (return_address < this_fn)
374 if (n == left)
375 return (__EIT_entry *) 0;
376 right = n - 1;
378 else if (return_address <= next_fn)
379 return &table[n];
380 else
381 left = n + 1;
385 /* Find the exception index table eintry for the given address.
386 Fill in the relevant fields of the UCB.
387 Returns _URC_FAILURE if an error occurred, _URC_OK on success. */
389 static _Unwind_Reason_Code
390 get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address)
392 const __EIT_entry * eitp;
393 int nrec;
395 /* The return address is the address of the instruction following the
396 call instruction (plus one in thumb mode). If this was the last
397 instruction in the function the address will lie in the following
398 function. Subtract 2 from the address so that it points within the call
399 instruction itself. */
400 return_address -= 2;
402 if (__gnu_Unwind_Find_exidx)
404 eitp = (const __EIT_entry *) __gnu_Unwind_Find_exidx (return_address,
405 &nrec);
406 if (!eitp)
408 UCB_PR_ADDR (ucbp) = 0;
409 return _URC_FAILURE;
412 else
414 eitp = &__exidx_start;
415 nrec = &__exidx_end - &__exidx_start;
418 eitp = search_EIT_table (eitp, nrec, return_address);
420 if (!eitp)
422 UCB_PR_ADDR (ucbp) = 0;
423 return _URC_FAILURE;
425 ucbp->pr_cache.fnstart = selfrel_offset31 (&eitp->fnoffset);
427 /* Can this frame be unwound at all? */
428 if (eitp->content == EXIDX_CANTUNWIND)
430 UCB_PR_ADDR (ucbp) = 0;
431 return _URC_END_OF_STACK;
434 /* Obtain the address of the "real" __EHT_Header word. */
436 if (eitp->content & uint32_highbit)
438 /* It is immediate data. */
439 ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
440 ucbp->pr_cache.additional = 1;
442 else
444 /* The low 31 bits of the content field are a self-relative
445 offset to an _Unwind_EHT_Entry structure. */
446 ucbp->pr_cache.ehtp =
447 (_Unwind_EHT_Header *) selfrel_offset31 (&eitp->content);
448 ucbp->pr_cache.additional = 0;
451 /* Discover the personality routine address. */
452 if (*ucbp->pr_cache.ehtp & (1u << 31))
454 /* One of the predefined standard routines. */
455 _uw idx = (*(_uw *) ucbp->pr_cache.ehtp >> 24) & 0xf;
456 if (idx == 0)
457 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr0;
458 else if (idx == 1)
459 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr1;
460 else if (idx == 2)
461 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr2;
462 else
463 { /* Failed */
464 UCB_PR_ADDR (ucbp) = 0;
465 return _URC_FAILURE;
468 else
470 /* Execute region offset to PR */
471 UCB_PR_ADDR (ucbp) = selfrel_offset31 (ucbp->pr_cache.ehtp);
473 return _URC_OK;
477 /* Perform phase2 unwinding. VRS is the initial virtual register state. */
479 static void __attribute__((noreturn))
480 unwind_phase2 (_Unwind_Control_Block * ucbp, phase2_vrs * vrs)
482 _Unwind_Reason_Code pr_result;
486 /* Find the entry for this routine. */
487 if (get_eit_entry (ucbp, vrs->core.r[R_PC]) != _URC_OK)
488 abort ();
490 UCB_SAVED_CALLSITE_ADDR (ucbp) = vrs->core.r[R_PC];
492 /* Call the pr to decide what to do. */
493 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
494 (_US_UNWIND_FRAME_STARTING, ucbp, (_Unwind_Context *) vrs);
496 while (pr_result == _URC_CONTINUE_UNWIND);
498 if (pr_result != _URC_INSTALL_CONTEXT)
499 abort();
501 restore_core_regs (&vrs->core);
504 /* Perform phase2 forced unwinding. */
506 static _Unwind_Reason_Code
507 unwind_phase2_forced (_Unwind_Control_Block *ucbp, phase2_vrs *entry_vrs,
508 int resuming)
510 _Unwind_Stop_Fn stop_fn = (_Unwind_Stop_Fn) UCB_FORCED_STOP_FN (ucbp);
511 void *stop_arg = (void *)UCB_FORCED_STOP_ARG (ucbp);
512 _Unwind_Reason_Code pr_result = 0;
513 /* We use phase1_vrs here even though we do not demand save, for the
514 prev_sp field. */
515 phase1_vrs saved_vrs, next_vrs;
517 /* Save the core registers. */
518 saved_vrs.core = entry_vrs->core;
519 /* We don't need to demand-save the non-core registers, because we
520 unwind in a single pass. */
521 saved_vrs.demand_save_flags = 0;
523 /* Unwind until we reach a propagation barrier. */
526 _Unwind_State action;
527 _Unwind_Reason_Code entry_code;
528 _Unwind_Reason_Code stop_code;
530 /* Find the entry for this routine. */
531 entry_code = get_eit_entry (ucbp, saved_vrs.core.r[R_PC]);
533 if (resuming)
535 action = _US_UNWIND_FRAME_RESUME | _US_FORCE_UNWIND;
536 resuming = 0;
538 else
539 action = _US_UNWIND_FRAME_STARTING | _US_FORCE_UNWIND;
541 if (entry_code == _URC_OK)
543 UCB_SAVED_CALLSITE_ADDR (ucbp) = saved_vrs.core.r[R_PC];
545 next_vrs = saved_vrs;
547 /* Call the pr to decide what to do. */
548 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
549 (action, ucbp, (void *) &next_vrs);
551 saved_vrs.prev_sp = next_vrs.core.r[R_SP];
553 else
555 /* Treat any failure as the end of unwinding, to cope more
556 gracefully with missing EH information. Mixed EH and
557 non-EH within one object will usually result in failure,
558 because the .ARM.exidx tables do not indicate the end
559 of the code to which they apply; but mixed EH and non-EH
560 shared objects should return an unwind failure at the
561 entry of a non-EH shared object. */
562 action |= _US_END_OF_STACK;
564 saved_vrs.prev_sp = saved_vrs.core.r[R_SP];
567 stop_code = stop_fn (1, action, ucbp->exception_class, ucbp,
568 (void *)&saved_vrs, stop_arg);
569 if (stop_code != _URC_NO_REASON)
570 return _URC_FAILURE;
572 if (entry_code != _URC_OK)
573 return entry_code;
575 saved_vrs = next_vrs;
577 while (pr_result == _URC_CONTINUE_UNWIND);
579 if (pr_result != _URC_INSTALL_CONTEXT)
581 /* Some sort of failure has occurred in the pr and probably the
582 pr returned _URC_FAILURE. */
583 return _URC_FAILURE;
586 restore_core_regs (&saved_vrs.core);
589 /* This is a very limited implementation of _Unwind_GetCFA. It returns
590 the stack pointer as it is about to be unwound, and is only valid
591 while calling the stop function during forced unwinding. If the
592 current personality routine result is going to run a cleanup, this
593 will not be the CFA; but when the frame is really unwound, it will
594 be. */
596 _Unwind_Word
597 _Unwind_GetCFA (_Unwind_Context *context)
599 return ((phase1_vrs *) context)->prev_sp;
602 /* Perform phase1 unwinding. UCBP is the exception being thrown, and
603 entry_VRS is the register state on entry to _Unwind_RaiseException. */
605 _Unwind_Reason_Code
606 __gnu_Unwind_RaiseException (_Unwind_Control_Block *, phase2_vrs *);
608 _Unwind_Reason_Code
609 __gnu_Unwind_RaiseException (_Unwind_Control_Block * ucbp,
610 phase2_vrs * entry_vrs)
612 phase1_vrs saved_vrs;
613 _Unwind_Reason_Code pr_result;
615 /* Set the pc to the call site. */
616 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
618 /* Save the core registers. */
619 saved_vrs.core = entry_vrs->core;
620 /* Set demand-save flags. */
621 saved_vrs.demand_save_flags = ~(_uw) 0;
623 /* Unwind until we reach a propagation barrier. */
626 /* Find the entry for this routine. */
627 if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK)
628 return _URC_FAILURE;
630 /* Call the pr to decide what to do. */
631 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
632 (_US_VIRTUAL_UNWIND_FRAME, ucbp, (void *) &saved_vrs);
634 while (pr_result == _URC_CONTINUE_UNWIND);
636 /* We've unwound as far as we want to go, so restore the original
637 register state. */
638 restore_non_core_regs (&saved_vrs);
639 if (pr_result != _URC_HANDLER_FOUND)
641 /* Some sort of failure has occurred in the pr and probably the
642 pr returned _URC_FAILURE. */
643 return _URC_FAILURE;
646 unwind_phase2 (ucbp, entry_vrs);
649 /* Resume unwinding after a cleanup has been run. UCBP is the exception
650 being thrown and ENTRY_VRS is the register state on entry to
651 _Unwind_Resume. */
652 _Unwind_Reason_Code
653 __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *,
654 _Unwind_Stop_Fn, void *, phase2_vrs *);
656 _Unwind_Reason_Code
657 __gnu_Unwind_ForcedUnwind (_Unwind_Control_Block *ucbp,
658 _Unwind_Stop_Fn stop_fn, void *stop_arg,
659 phase2_vrs *entry_vrs)
661 UCB_FORCED_STOP_FN (ucbp) = (_uw) stop_fn;
662 UCB_FORCED_STOP_ARG (ucbp) = (_uw) stop_arg;
664 /* Set the pc to the call site. */
665 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
667 return unwind_phase2_forced (ucbp, entry_vrs, 0);
670 _Unwind_Reason_Code
671 __gnu_Unwind_Resume (_Unwind_Control_Block *, phase2_vrs *);
673 _Unwind_Reason_Code
674 __gnu_Unwind_Resume (_Unwind_Control_Block * ucbp, phase2_vrs * entry_vrs)
676 _Unwind_Reason_Code pr_result;
678 /* Recover the saved address. */
679 entry_vrs->core.r[R_PC] = UCB_SAVED_CALLSITE_ADDR (ucbp);
681 if (UCB_FORCED_STOP_FN (ucbp))
683 unwind_phase2_forced (ucbp, entry_vrs, 1);
685 /* We can't return failure at this point. */
686 abort ();
689 /* Call the cached PR. */
690 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
691 (_US_UNWIND_FRAME_RESUME, ucbp, (_Unwind_Context *) entry_vrs);
693 switch (pr_result)
695 case _URC_INSTALL_CONTEXT:
696 /* Upload the registers to enter the landing pad. */
697 restore_core_regs (&entry_vrs->core);
699 case _URC_CONTINUE_UNWIND:
700 /* Continue unwinding the next frame. */
701 unwind_phase2 (ucbp, entry_vrs);
703 default:
704 abort ();
708 _Unwind_Reason_Code
709 __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block *, phase2_vrs *);
711 _Unwind_Reason_Code
712 __gnu_Unwind_Resume_or_Rethrow (_Unwind_Control_Block * ucbp,
713 phase2_vrs * entry_vrs)
715 if (!UCB_FORCED_STOP_FN (ucbp))
716 return __gnu_Unwind_RaiseException (ucbp, entry_vrs);
718 /* Set the pc to the call site. */
719 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
720 /* Continue unwinding the next frame. */
721 return unwind_phase2_forced (ucbp, entry_vrs, 0);
724 /* Clean up an exception object when unwinding is complete. */
725 void
726 _Unwind_Complete (_Unwind_Control_Block * ucbp __attribute__((unused)))
731 /* Get the _Unwind_Control_Block from an _Unwind_Context. */
733 static inline _Unwind_Control_Block *
734 unwind_UCB_from_context (_Unwind_Context * context)
736 return (_Unwind_Control_Block *) _Unwind_GetGR (context, R_IP);
740 /* Free an exception. */
742 void
743 _Unwind_DeleteException (_Unwind_Exception * exc)
745 if (exc->exception_cleanup)
746 (*exc->exception_cleanup) (_URC_FOREIGN_EXCEPTION_CAUGHT, exc);
750 /* Common implementation for ARM ABI defined personality routines.
751 ID is the index of the personality routine, other arguments are as defined
752 by __aeabi_unwind_cpp_pr{0,1,2}. */
754 static _Unwind_Reason_Code
755 __gnu_unwind_pr_common (_Unwind_State state,
756 _Unwind_Control_Block *ucbp,
757 _Unwind_Context *context,
758 int id)
760 __gnu_unwind_state uws;
761 _uw *data;
762 _uw offset;
763 _uw len;
764 _uw rtti_count;
765 int phase2_call_unexpected_after_unwind = 0;
766 int in_range = 0;
767 int forced_unwind = state & _US_FORCE_UNWIND;
769 state &= _US_ACTION_MASK;
771 data = (_uw *) ucbp->pr_cache.ehtp;
772 uws.data = *(data++);
773 uws.next = data;
774 if (id == 0)
776 uws.data <<= 8;
777 uws.words_left = 0;
778 uws.bytes_left = 3;
780 else
782 uws.words_left = (uws.data >> 16) & 0xff;
783 uws.data <<= 16;
784 uws.bytes_left = 2;
785 data += uws.words_left;
788 /* Restore the saved pointer. */
789 if (state == _US_UNWIND_FRAME_RESUME)
790 data = (_uw *) ucbp->cleanup_cache.bitpattern[0];
792 if ((ucbp->pr_cache.additional & 1) == 0)
794 /* Process descriptors. */
795 while (*data)
797 _uw addr;
798 _uw fnstart;
800 if (id == 2)
802 len = ((EHT32 *) data)->length;
803 offset = ((EHT32 *) data)->offset;
804 data += 2;
806 else
808 len = ((EHT16 *) data)->length;
809 offset = ((EHT16 *) data)->offset;
810 data++;
813 fnstart = ucbp->pr_cache.fnstart + (offset & ~1);
814 addr = _Unwind_GetGR (context, R_PC);
815 in_range = (fnstart <= addr && addr < fnstart + (len & ~1));
817 switch (((offset & 1) << 1) | (len & 1))
819 case 0:
820 /* Cleanup. */
821 if (state != _US_VIRTUAL_UNWIND_FRAME
822 && in_range)
824 /* Cleanup in range, and we are running cleanups. */
825 _uw lp;
827 /* Landing pad address is 31-bit pc-relative offset. */
828 lp = selfrel_offset31 (data);
829 data++;
830 /* Save the exception data pointer. */
831 ucbp->cleanup_cache.bitpattern[0] = (_uw) data;
832 if (!__cxa_begin_cleanup (ucbp))
833 return _URC_FAILURE;
834 /* Setup the VRS to enter the landing pad. */
835 _Unwind_SetGR (context, R_PC, lp);
836 return _URC_INSTALL_CONTEXT;
838 /* Cleanup not in range, or we are in stage 1. */
839 data++;
840 break;
842 case 1:
843 /* Catch handler. */
844 if (state == _US_VIRTUAL_UNWIND_FRAME)
846 if (in_range)
848 /* Check for a barrier. */
849 _uw rtti;
850 void *matched;
852 /* Check for no-throw areas. */
853 if (data[1] == (_uw) -2)
854 return _URC_FAILURE;
856 /* The thrown object immediately follows the ECB. */
857 matched = (void *)(ucbp + 1);
858 if (data[1] != (_uw) -1)
860 /* Match a catch specification. */
861 rtti = _Unwind_decode_target2 ((_uw) &data[1]);
862 if (!__cxa_type_match (ucbp, (type_info *) rtti,
863 &matched))
864 matched = (void *)0;
867 if (matched)
869 ucbp->barrier_cache.sp =
870 _Unwind_GetGR (context, R_SP);
871 ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
872 ucbp->barrier_cache.bitpattern[1] = (_uw) data;
873 return _URC_HANDLER_FOUND;
876 /* Handler out of range, or not matched. */
878 else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
879 && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
881 /* Matched a previous propagation barrier. */
882 _uw lp;
884 /* Setup for entry to the handler. */
885 lp = selfrel_offset31 (data);
886 _Unwind_SetGR (context, R_PC, lp);
887 _Unwind_SetGR (context, 0, (_uw) ucbp);
888 return _URC_INSTALL_CONTEXT;
890 /* Catch handler not matched. Advance to the next descriptor. */
891 data += 2;
892 break;
894 case 2:
895 rtti_count = data[0] & 0x7fffffff;
896 /* Exception specification. */
897 if (state == _US_VIRTUAL_UNWIND_FRAME)
899 if (in_range && (!forced_unwind || !rtti_count))
901 /* Match against the exception specification. */
902 _uw i;
903 _uw rtti;
904 void *matched;
906 for (i = 0; i < rtti_count; i++)
908 matched = (void *)(ucbp + 1);
909 rtti = _Unwind_decode_target2 ((_uw) &data[i + 1]);
910 if (__cxa_type_match (ucbp, (type_info *) rtti,
911 &matched))
912 break;
915 if (i == rtti_count)
917 /* Exception does not match the spec. */
918 ucbp->barrier_cache.sp =
919 _Unwind_GetGR (context, R_SP);
920 ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
921 ucbp->barrier_cache.bitpattern[1] = (_uw) data;
922 return _URC_HANDLER_FOUND;
925 /* Handler out of range, or exception is permitted. */
927 else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
928 && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
930 /* Matched a previous propagation barrier. */
931 _uw lp;
932 /* Record the RTTI list for __cxa_call_unexpected. */
933 ucbp->barrier_cache.bitpattern[1] = rtti_count;
934 ucbp->barrier_cache.bitpattern[2] = 0;
935 ucbp->barrier_cache.bitpattern[3] = 4;
936 ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1];
938 if (data[0] & uint32_highbit)
939 phase2_call_unexpected_after_unwind = 1;
940 else
942 data += rtti_count + 1;
943 /* Setup for entry to the handler. */
944 lp = selfrel_offset31 (data);
945 data++;
946 _Unwind_SetGR (context, R_PC, lp);
947 _Unwind_SetGR (context, 0, (_uw) ucbp);
948 return _URC_INSTALL_CONTEXT;
951 if (data[0] & uint32_highbit)
952 data++;
953 data += rtti_count + 1;
954 break;
956 default:
957 /* Should never happen. */
958 return _URC_FAILURE;
960 /* Finished processing this descriptor. */
964 if (__gnu_unwind_execute (context, &uws) != _URC_OK)
965 return _URC_FAILURE;
967 if (phase2_call_unexpected_after_unwind)
969 /* Enter __cxa_unexpected as if called from the call site. */
970 _Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC));
971 _Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected);
972 return _URC_INSTALL_CONTEXT;
975 return _URC_CONTINUE_UNWIND;
979 /* ABI defined personality routine entry points. */
981 _Unwind_Reason_Code
982 __aeabi_unwind_cpp_pr0 (_Unwind_State state,
983 _Unwind_Control_Block *ucbp,
984 _Unwind_Context *context)
986 return __gnu_unwind_pr_common (state, ucbp, context, 0);
989 _Unwind_Reason_Code
990 __aeabi_unwind_cpp_pr1 (_Unwind_State state,
991 _Unwind_Control_Block *ucbp,
992 _Unwind_Context *context)
994 return __gnu_unwind_pr_common (state, ucbp, context, 1);
997 _Unwind_Reason_Code
998 __aeabi_unwind_cpp_pr2 (_Unwind_State state,
999 _Unwind_Control_Block *ucbp,
1000 _Unwind_Context *context)
1002 return __gnu_unwind_pr_common (state, ucbp, context, 2);