* c-decl.c, tree-object-size.c, tree-vectorizer.c,
[official-gcc.git] / gcc / config / arm / unwind-arm.c
blobe436f7c2659c377dcef5af07ee67047ac63f6196
1 /* ARM EABI compliant unwinding routines.
2 Copyright (C) 2004, 2005 Free Software Foundation, Inc.
3 Contributed by Paul Brook
5 This file is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published by the
7 Free Software Foundation; either version 2, or (at your option) any
8 later version.
10 In addition to the permissions in the GNU General Public License, the
11 Free Software Foundation gives you unlimited permission to link the
12 compiled version of this file into combinations with other programs,
13 and to distribute those combinations without any restriction coming
14 from the use of this file. (The General Public License restrictions
15 do apply in other respects; for example, they cover modification of
16 the file, and distribution when not linked into a combine
17 executable.)
19 This file is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
27 Boston, MA 02110-1301, USA. */
28 #include "unwind.h"
30 /* Definitions for C++ runtime support routines. We make these weak
31 declarations to avoid pulling in libsupc++ unnecessarily. */
32 typedef unsigned char bool;
34 typedef struct _ZSt9type_info type_info; /* This names C++ type_info type */
36 void __attribute__((weak)) __cxa_call_unexpected(_Unwind_Control_Block *ucbp);
37 bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp);
38 bool __attribute__((weak)) __cxa_type_match(_Unwind_Control_Block *ucbp,
39 const type_info *rttip,
40 void **matched_object);
42 _Unwind_Ptr __attribute__((weak))
43 __gnu_Unwind_Find_exidx (_Unwind_Ptr, int *);
45 /* Misc constants. */
46 #define R_IP 12
47 #define R_SP 13
48 #define R_LR 14
49 #define R_PC 15
51 #define EXIDX_CANTUNWIND 1
52 #define uint32_highbit (((_uw) 1) << 31)
54 #define UCB_PR_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved2)
55 #define UCB_SAVED_CALLSITE_ADDR(ucbp) ((ucbp)->unwinder_cache.reserved3)
57 struct core_regs
59 _uw r[16];
62 /* We use normal integer types here to avoid the compiler generating
63 coprocessor instructions. */
64 struct vfp_regs
66 _uw64 d[16];
67 _uw pad;
70 struct fpa_reg
72 _uw w[3];
75 struct fpa_regs
77 struct fpa_reg f[8];
80 /* Unwind descriptors. */
82 typedef struct
84 _uw16 length;
85 _uw16 offset;
86 } EHT16;
88 typedef struct
90 _uw length;
91 _uw offset;
92 } EHT32;
94 /* The ABI specifies that the unwind routines may only use core registers,
95 except when actually manipulating coprocessor state. This allows
96 us to write one implementation that works on all platforms by
97 demand-saving coprocessor registers.
99 During unwinding we hold the coprocessor state in the actual hardware
100 registers and allocate demand-save areas for use during phase1
101 unwinding. */
103 typedef struct
105 /* The first fields must be the same as a phase2_vrs. */
106 _uw demand_save_flags;
107 struct core_regs core;
108 struct vfp_regs vfp;
109 struct fpa_regs fpa;
110 } phase1_vrs;
112 #define DEMAND_SAVE_VFP 1
114 /* This must match the structure created by the assembly wrappers. */
115 typedef struct
117 _uw demand_save_flags;
118 struct core_regs core;
119 } phase2_vrs;
122 /* An exception index table entry. */
124 typedef struct __EIT_entry
126 _uw fnoffset;
127 _uw content;
128 } __EIT_entry;
130 /* Assembly helper functions. */
132 /* Restore core register state. Never returns. */
133 void __attribute__((noreturn)) restore_core_regs (struct core_regs *);
136 /* Coprocessor register state manipulation functions. */
138 void __gnu_Unwind_Save_VFP (struct vfp_regs * p);
139 void __gnu_Unwind_Restore_VFP (struct vfp_regs * p);
141 /* Restore coprocessor state after phase1 unwinding. */
142 static void
143 restore_non_core_regs (phase1_vrs * vrs)
145 if ((vrs->demand_save_flags & DEMAND_SAVE_VFP) == 0)
146 __gnu_Unwind_Restore_VFP (&vrs->vfp);
149 /* A better way to do this would probably be to compare the absolute address
150 with a segment relative relocation of the same symbol. */
152 extern int __text_start;
153 extern int __data_start;
155 /* The exception index table location. */
156 extern __EIT_entry __exidx_start;
157 extern __EIT_entry __exidx_end;
159 /* ABI defined personality routines. */
160 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr0 (_Unwind_State,
161 _Unwind_Control_Block *, _Unwind_Context *);// __attribute__((weak));
162 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr1 (_Unwind_State,
163 _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
164 extern _Unwind_Reason_Code __aeabi_unwind_cpp_pr2 (_Unwind_State,
165 _Unwind_Control_Block *, _Unwind_Context *) __attribute__((weak));
167 /* ABI defined routine to store a virtual register to memory. */
169 _Unwind_VRS_Result _Unwind_VRS_Get (_Unwind_Context *context,
170 _Unwind_VRS_RegClass regclass,
171 _uw regno,
172 _Unwind_VRS_DataRepresentation representation,
173 void *valuep)
175 phase1_vrs *vrs = (phase1_vrs *) context;
177 switch (regclass)
179 case _UVRSC_CORE:
180 if (representation != _UVRSD_UINT32
181 || regno > 15)
182 return _UVRSR_FAILED;
183 *(_uw *) valuep = vrs->core.r[regno];
184 return _UVRSR_OK;
186 case _UVRSC_VFP:
187 case _UVRSC_FPA:
188 case _UVRSC_WMMXD:
189 case _UVRSC_WMMXC:
190 return _UVRSR_NOT_IMPLEMENTED;
192 default:
193 return _UVRSR_FAILED;
198 /* ABI defined function to load a virtual register from memory. */
200 _Unwind_VRS_Result _Unwind_VRS_Set (_Unwind_Context *context,
201 _Unwind_VRS_RegClass regclass,
202 _uw regno,
203 _Unwind_VRS_DataRepresentation representation,
204 void *valuep)
206 phase1_vrs *vrs = (phase1_vrs *) context;
208 switch (regclass)
210 case _UVRSC_CORE:
211 if (representation != _UVRSD_UINT32
212 || regno > 15)
213 return _UVRSR_FAILED;
215 vrs->core.r[regno] = *(_uw *) valuep;
216 return _UVRSR_OK;
218 case _UVRSC_VFP:
219 case _UVRSC_FPA:
220 case _UVRSC_WMMXD:
221 case _UVRSC_WMMXC:
222 return _UVRSR_NOT_IMPLEMENTED;
224 default:
225 return _UVRSR_FAILED;
230 /* ABI defined function to pop registers off the stack. */
232 _Unwind_VRS_Result _Unwind_VRS_Pop (_Unwind_Context *context,
233 _Unwind_VRS_RegClass regclass,
234 _uw discriminator,
235 _Unwind_VRS_DataRepresentation representation)
237 phase1_vrs *vrs = (phase1_vrs *) context;
239 switch (regclass)
241 case _UVRSC_CORE:
243 _uw *ptr;
244 _uw mask;
245 int i;
247 if (representation != _UVRSD_UINT32)
248 return _UVRSR_FAILED;
250 mask = discriminator & 0xffff;
251 ptr = (_uw *) vrs->core.r[R_SP];
252 /* Pop the requested registers. */
253 for (i = 0; i < 16; i++)
255 if (mask & (1 << i))
256 vrs->core.r[i] = *(ptr++);
258 /* Writeback the stack pointer value if it wasn't restored. */
259 if ((mask & (1 << R_SP)) == 0)
260 vrs->core.r[R_SP] = (_uw) ptr;
262 return _UVRSR_OK;
264 case _UVRSC_VFP:
266 _uw start = discriminator >> 16;
267 _uw count = discriminator & 0xffff;
268 struct vfp_regs tmp;
269 _uw *sp;
270 _uw *dest;
272 if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE)
273 || start + count > 16)
274 return _UVRSR_FAILED;
276 if (vrs->demand_save_flags & DEMAND_SAVE_VFP)
278 /* Demand-save resisters for stage1. */
279 vrs->demand_save_flags &= ~DEMAND_SAVE_VFP;
280 __gnu_Unwind_Save_VFP (&vrs->vfp);
283 /* Restore the registers from the stack. Do this by saving the
284 current VFP registers to a memory area, moving the in-memory
285 values into that area, and restoring from the whole area.
286 For _UVRSD_VFPX we assume FSTMX standard format 1. */
287 __gnu_Unwind_Save_VFP (&tmp);
289 /* The stack address is only guaranteed to be word aligned, so
290 we can't use doubleword copies. */
291 sp = (_uw *) vrs->core.r[R_SP];
292 dest = (_uw *) &tmp.d[start];
293 count *= 2;
294 while (count--)
295 *(dest++) = *(sp++);
297 /* Skip the pad word */
298 if (representation == _UVRSD_VFPX)
299 sp++;
301 /* Set the new stack pointer. */
302 vrs->core.r[R_SP] = (_uw) sp;
304 /* Reload the registers. */
305 __gnu_Unwind_Restore_VFP (&tmp);
307 return _UVRSR_OK;
309 case _UVRSC_FPA:
310 case _UVRSC_WMMXD:
311 case _UVRSC_WMMXC:
312 return _UVRSR_NOT_IMPLEMENTED;
314 default:
315 return _UVRSR_FAILED;
320 /* Core unwinding functions. */
322 /* Calculate the address encoded by a 31-bit self-relative offset at address
323 P. */
324 static inline _uw
325 selfrel_offset31 (const _uw *p)
327 _uw offset;
329 offset = *p;
330 /* Sign extend to 32 bits. */
331 if (offset & (1 << 30))
332 offset |= 1u << 31;
334 return offset + (_uw) p;
338 /* Perform a binary search for RETURN_ADDRESS in TABLE. The table contains
339 NREC entries. */
341 static const __EIT_entry *
342 search_EIT_table (const __EIT_entry * table, int nrec, _uw return_address)
344 _uw next_fn;
345 _uw this_fn;
346 int n, left, right;
348 if (nrec == 0)
349 return (__EIT_entry *) 0;
351 left = 0;
352 right = nrec - 1;
354 while (1)
356 n = (left + right) / 2;
357 this_fn = selfrel_offset31 (&table[n].fnoffset);
358 if (n != nrec - 1)
359 next_fn = selfrel_offset31 (&table[n + 1].fnoffset);
360 else
361 next_fn = ~(_uw) 0;
363 if (return_address < this_fn)
365 if (n == left)
366 return (__EIT_entry *) 0;
367 right = n - 1;
369 else if (return_address < next_fn)
370 return &table[n];
371 else
372 left = n + 1;
376 /* Find the exception index table eintry for the given address.
377 Fill in the relevant fields of the UCB.
378 Returns _URC_FAILURE if an error occurred, _URC_OK on success. */
380 static _Unwind_Reason_Code
381 get_eit_entry (_Unwind_Control_Block *ucbp, _uw return_address)
383 const __EIT_entry * eitp;
384 int nrec;
386 /* The return address is the address of the instruction following the
387 call instruction (plus one in thumb mode). If this was the last
388 instruction in the function the address will lie in the following
389 function. Subtract 2 from the address so that it points within the call
390 instruction itself. */
391 return_address -= 2;
393 if (__gnu_Unwind_Find_exidx)
395 eitp = (const __EIT_entry *) __gnu_Unwind_Find_exidx (return_address,
396 &nrec);
397 if (!eitp)
399 UCB_PR_ADDR (ucbp) = 0;
400 return _URC_FAILURE;
403 else
405 eitp = &__exidx_start;
406 nrec = &__exidx_end - &__exidx_start;
409 eitp = search_EIT_table (eitp, nrec, return_address);
411 if (!eitp)
413 UCB_PR_ADDR (ucbp) = 0;
414 return _URC_FAILURE;
416 ucbp->pr_cache.fnstart = selfrel_offset31 (&eitp->fnoffset);
418 /* Can this frame be unwound at all? */
419 if (eitp->content == EXIDX_CANTUNWIND)
421 UCB_PR_ADDR (ucbp) = 0;
422 return _URC_FAILURE;
425 /* Obtain the address of the "real" __EHT_Header word. */
427 if (eitp->content & uint32_highbit)
429 /* It is immediate data. */
430 ucbp->pr_cache.ehtp = (_Unwind_EHT_Header *)&eitp->content;
431 ucbp->pr_cache.additional = 1;
433 else
435 /* The low 31 bits of the content field are a self-relative
436 offset to an _Unwind_EHT_Entry structure. */
437 ucbp->pr_cache.ehtp =
438 (_Unwind_EHT_Header *) selfrel_offset31 (&eitp->content);
439 ucbp->pr_cache.additional = 0;
442 /* Discover the personality routine address. */
443 if (*ucbp->pr_cache.ehtp & (1u << 31))
445 /* One of the predefined standard routines. */
446 _uw idx = (*(_uw *) ucbp->pr_cache.ehtp >> 24) & 0xf;
447 if (idx == 0)
448 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr0;
449 else if (idx == 1)
450 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr1;
451 else if (idx == 2)
452 UCB_PR_ADDR (ucbp) = (_uw) &__aeabi_unwind_cpp_pr2;
453 else
454 { /* Failed */
455 UCB_PR_ADDR (ucbp) = 0;
456 return _URC_FAILURE;
459 else
461 /* Execute region offset to PR */
462 UCB_PR_ADDR (ucbp) = selfrel_offset31 (ucbp->pr_cache.ehtp);
464 return _URC_OK;
468 /* Perform phase2 unwinding. VRS is the initial virtual register state. */
470 static void __attribute__((noreturn))
471 unwind_phase2 (_Unwind_Control_Block * ucbp, phase2_vrs * vrs)
473 _Unwind_Reason_Code pr_result;
475 for(;;)
477 /* Find the entry for this routine. */
478 if (get_eit_entry (ucbp, vrs->core.r[R_PC]) != _URC_OK)
479 abort ();
481 UCB_SAVED_CALLSITE_ADDR (ucbp) = vrs->core.r[R_PC];
483 /* Call the pr to decide what to do. */
484 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
485 (_US_UNWIND_FRAME_STARTING, ucbp, (_Unwind_Context *) vrs);
487 if (pr_result != _URC_CONTINUE_UNWIND)
488 break;
491 if (pr_result != _URC_INSTALL_CONTEXT)
492 abort();
494 restore_core_regs (&vrs->core);
497 /* Perform phase1 unwinding. UCBP is the exception being thrown, and
498 entry_VRS is the register state on entry to _Unwind_RaiseException. */
500 _Unwind_Reason_Code
501 __gnu_Unwind_RaiseException (_Unwind_Control_Block *, phase2_vrs *);
503 _Unwind_Reason_Code
504 __gnu_Unwind_RaiseException (_Unwind_Control_Block * ucbp,
505 phase2_vrs * entry_vrs)
507 phase1_vrs saved_vrs;
508 _Unwind_Reason_Code pr_result;
510 /* Set the pc to the call site. */
511 entry_vrs->core.r[R_PC] = entry_vrs->core.r[R_LR];
513 /* Save the core registers. */
514 saved_vrs.core = entry_vrs->core;
515 /* Set demand-save flags. */
516 saved_vrs.demand_save_flags = ~(_uw) 0;
518 /* Unwind until we reach a propagation barrier. */
519 for (;;)
521 /* Find the entry for this routine. */
522 if (get_eit_entry (ucbp, saved_vrs.core.r[R_PC]) != _URC_OK)
523 return _URC_FAILURE;
525 /* Call the pr to decide what to do. */
526 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
527 (_US_VIRTUAL_UNWIND_FRAME, ucbp, (void *) &saved_vrs);
529 if (pr_result != _URC_CONTINUE_UNWIND)
530 break;
533 /* We've unwound as far as we want to go, so restore the original
534 register state. */
535 restore_non_core_regs (&saved_vrs);
536 if (pr_result != _URC_HANDLER_FOUND)
538 /* Some sort of failure has occurred in the pr and probably the
539 pr returned _URC_FAILURE. */
540 return _URC_FAILURE;
543 unwind_phase2 (ucbp, entry_vrs);
546 /* Resume unwinding after a cleanup has been run. UCBP is the exception
547 being thrown and ENTRY_VRS is the register state on entry to
548 _Unwind_Resume. */
549 _Unwind_Reason_Code
550 __gnu_Unwind_Resume (_Unwind_Control_Block *, phase2_vrs *);
552 _Unwind_Reason_Code
553 __gnu_Unwind_Resume (_Unwind_Control_Block * ucbp, phase2_vrs * entry_vrs)
555 _Unwind_Reason_Code pr_result;
557 /* Recover the saved address. */
558 entry_vrs->core.r[R_PC] = UCB_SAVED_CALLSITE_ADDR (ucbp);
560 /* Call the cached PR. */
561 pr_result = ((personality_routine) UCB_PR_ADDR (ucbp))
562 (_US_UNWIND_FRAME_RESUME, ucbp, (_Unwind_Context *) entry_vrs);
564 switch (pr_result)
566 case _URC_INSTALL_CONTEXT:
567 /* Upload the registers to enter the landing pad. */
568 restore_core_regs (&entry_vrs->core);
570 case _URC_CONTINUE_UNWIND:
571 /* Continue unwinding the next frame. */
572 unwind_phase2 (ucbp, entry_vrs);
574 default:
575 abort ();
579 /* Clean up an exception object when unwinding is complete. */
580 void
581 _Unwind_Complete (_Unwind_Control_Block * ucbp __attribute__((unused)))
586 /* Get the _Unwind_Control_Block from an _Unwind_Context. */
588 static inline _Unwind_Control_Block *
589 unwind_UCB_from_context (_Unwind_Context * context)
591 return (_Unwind_Control_Block *) _Unwind_GetGR (context, R_IP);
595 /* Free an exception. */
597 void
598 _Unwind_DeleteException (_Unwind_Exception * exc)
600 if (exc->exception_cleanup)
601 (*exc->exception_cleanup) (_URC_FOREIGN_EXCEPTION_CAUGHT, exc);
605 /* Common implementation for ARM ABI defined personality routines.
606 ID is the index of the personality routine, other arguments are as defined
607 by __aeabi_unwind_cpp_pr{0,1,2}. */
609 static _Unwind_Reason_Code
610 __gnu_unwind_pr_common (_Unwind_State state,
611 _Unwind_Control_Block *ucbp,
612 _Unwind_Context *context,
613 int id)
615 __gnu_unwind_state uws;
616 _uw *data;
617 _uw offset;
618 _uw len;
619 _uw rtti_count;
620 int phase2_call_unexpected_after_unwind = 0;
621 int in_range = 0;
623 data = (_uw *) ucbp->pr_cache.ehtp;
624 uws.data = *(data++);
625 uws.next = data;
626 if (id == 0)
628 uws.data <<= 8;
629 uws.words_left = 0;
630 uws.bytes_left = 3;
632 else
634 uws.words_left = (uws.data >> 16) & 0xff;
635 uws.data <<= 16;
636 uws.bytes_left = 2;
637 data += uws.words_left;
640 /* Restore the saved pointer. */
641 if (state == _US_UNWIND_FRAME_RESUME)
642 data = (_uw *) ucbp->cleanup_cache.bitpattern[0];
644 if ((ucbp->pr_cache.additional & 1) == 0)
646 /* Process descriptors. */
647 while (*data)
649 _uw addr;
650 _uw fnstart;
652 if (id == 2)
654 len = ((EHT32 *) data)->length;
655 offset = ((EHT32 *) data)->offset;
656 data += 2;
658 else
660 len = ((EHT16 *) data)->length;
661 offset = ((EHT16 *) data)->offset;
662 data++;
665 fnstart = ucbp->pr_cache.fnstart + (offset & ~1);
666 addr = _Unwind_GetGR (context, R_PC);
667 in_range = (fnstart <= addr && addr < fnstart + (len & ~1));
669 switch (((offset & 1) << 1) | (len & 1))
671 case 0:
672 /* Cleanup. */
673 if (state != _US_VIRTUAL_UNWIND_FRAME
674 && in_range)
676 /* Cleanup in range, and we are running cleanups. */
677 _uw lp;
679 /* Landing pad address is 31-bit pc-relative offset. */
680 lp = selfrel_offset31 (data);
681 data++;
682 /* Save the exception data pointer. */
683 ucbp->cleanup_cache.bitpattern[0] = (_uw) data;
684 if (!__cxa_begin_cleanup (ucbp))
685 return _URC_FAILURE;
686 /* Setup the VRS to enter the landing pad. */
687 _Unwind_SetGR (context, R_PC, lp);
688 return _URC_INSTALL_CONTEXT;
690 /* Cleanup not in range, or we are in stage 1. */
691 data++;
692 break;
694 case 1:
695 /* Catch handler. */
696 if (state == _US_VIRTUAL_UNWIND_FRAME)
698 if (in_range)
700 /* Check for a barrier. */
701 _uw rtti;
702 void *matched;
704 /* Check for no-throw areas. */
705 if (data[1] == (_uw) -2)
706 return _URC_FAILURE;
708 /* The thrown object immediately follows the ECB. */
709 matched = (void *)(ucbp + 1);
710 if (data[1] != (_uw) -1)
712 /* Match a catch specification. */
713 rtti = _Unwind_decode_target2 ((_uw) &data[1]);
714 if (!__cxa_type_match (ucbp, (type_info *) rtti,
715 &matched))
716 matched = (void *)0;
719 if (matched)
721 ucbp->barrier_cache.sp =
722 _Unwind_GetGR (context, R_SP);
723 ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
724 ucbp->barrier_cache.bitpattern[1] = (_uw) data;
725 return _URC_HANDLER_FOUND;
728 /* Handler out of range, or not matched. */
730 else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
731 && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
733 /* Matched a previous propagation barrier. */
734 _uw lp;
736 /* Setup for entry to the handler. */
737 lp = selfrel_offset31 (data);
738 _Unwind_SetGR (context, R_PC, lp);
739 _Unwind_SetGR (context, 0, (_uw) ucbp);
740 return _URC_INSTALL_CONTEXT;
742 /* Catch handler not matched. Advance to the next descriptor. */
743 data += 2;
744 break;
746 case 2:
747 rtti_count = data[0] & 0x7fffffff;
748 /* Exception specification. */
749 if (state == _US_VIRTUAL_UNWIND_FRAME)
751 if (in_range)
753 /* Match against teh exception specification. */
754 _uw i;
755 _uw rtti;
756 void *matched;
758 for (i = 0; i < rtti_count; i++)
760 matched = (void *)(ucbp + 1);
761 rtti = _Unwind_decode_target2 ((_uw) &data[i + 1]);
762 if (__cxa_type_match (ucbp, (type_info *) rtti,
763 &matched))
764 break;
767 if (i == rtti_count)
769 /* Exception does not match the spec. */
770 ucbp->barrier_cache.sp =
771 _Unwind_GetGR (context, R_SP);
772 ucbp->barrier_cache.bitpattern[0] = (_uw) matched;
773 ucbp->barrier_cache.bitpattern[1] = (_uw) data;
774 return _URC_HANDLER_FOUND;
777 /* Handler out of range, or exception is permitted. */
779 else if (ucbp->barrier_cache.sp == _Unwind_GetGR (context, R_SP)
780 && ucbp->barrier_cache.bitpattern[1] == (_uw) data)
782 /* Matched a previous propagation barrier. */
783 _uw lp;
784 /* Record the RTTI list for __cxa_call_unexpected. */
785 ucbp->barrier_cache.bitpattern[1] = rtti_count;
786 ucbp->barrier_cache.bitpattern[2] = 0;
787 ucbp->barrier_cache.bitpattern[3] = 4;
788 ucbp->barrier_cache.bitpattern[4] = (_uw) &data[1];
790 if (data[0] & uint32_highbit)
791 phase2_call_unexpected_after_unwind = 1;
792 else
794 data += rtti_count + 1;
795 /* Setup for entry to the handler. */
796 lp = selfrel_offset31 (data);
797 data++;
798 _Unwind_SetGR (context, R_PC, lp);
799 _Unwind_SetGR (context, 0, (_uw) ucbp);
800 return _URC_INSTALL_CONTEXT;
803 if (data[0] & uint32_highbit)
804 data++;
805 data += rtti_count + 1;
806 break;
808 default:
809 /* Should never happen. */
810 return _URC_FAILURE;
812 /* Finished processing this descriptor. */
816 if (__gnu_unwind_execute (context, &uws) != _URC_OK)
817 return _URC_FAILURE;
819 if (phase2_call_unexpected_after_unwind)
821 /* Enter __cxa_unexpected as if called from the call site. */
822 _Unwind_SetGR (context, R_LR, _Unwind_GetGR (context, R_PC));
823 _Unwind_SetGR (context, R_PC, (_uw) &__cxa_call_unexpected);
824 return _URC_INSTALL_CONTEXT;
827 return _URC_CONTINUE_UNWIND;
831 /* ABI defined personality routine entry points. */
833 _Unwind_Reason_Code
834 __aeabi_unwind_cpp_pr0 (_Unwind_State state,
835 _Unwind_Control_Block *ucbp,
836 _Unwind_Context *context)
838 return __gnu_unwind_pr_common (state, ucbp, context, 0);
841 _Unwind_Reason_Code
842 __aeabi_unwind_cpp_pr1 (_Unwind_State state,
843 _Unwind_Control_Block *ucbp,
844 _Unwind_Context *context)
846 return __gnu_unwind_pr_common (state, ucbp, context, 1);
849 _Unwind_Reason_Code
850 __aeabi_unwind_cpp_pr2 (_Unwind_State state,
851 _Unwind_Control_Block *ucbp,
852 _Unwind_Context *context)
854 return __gnu_unwind_pr_common (state, ucbp, context, 2);