1 @ libgcc routines for ARM cpu.
2 @ Division routines
, written by Richard Earnshaw
, (rearnsha
@armltd.co.uk
)
4 /* Copyright
1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
5 Free Software Foundation
, Inc.
7 This file is free software
; you can redistribute it and/or modify it
8 under the terms of the GNU General
Public License as published by the
9 Free Software Foundation
; either version 2, or (at your option) any
12 In addition to the permissions
in the GNU General
Public License
, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of
this file
into combinations with other programs
,
15 and to distribute those combinations without any restriction coming
16 from the use of
this file.
(The General
Public License restrictions
17 do apply
in other respects
; for example, they cover modification of
18 the file
, and distribution when
not linked
into a combine
21 This file is distributed
in the hope that it will be useful
, but
22 WITHOUT ANY WARRANTY
; without even the implied warranty of
23 MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General
Public License for more details.
26 You should have received a copy of the GNU General
Public License
27 along with
this program
; see the file COPYING. If not, write to
28 the Free Software Foundation
, 59 Temple Place
- Suite
330,
29 Boston
, MA
02111-1307, USA.
*/
30 /* ------------------------------------------------------------------------ */
32 /* We need to know what prefix to
add to function names.
*/
34 #ifndef __USER_LABEL_PREFIX__
35 #error __USER_LABEL_PREFIX__
not defined
38 /* ANSI concatenation macros.
*/
40 #define CONCAT1
(a
, b
) CONCAT2
(a
, b
)
41 #define CONCAT2
(a
, b
) a ## b
43 /* Use the right prefix for
global labels.
*/
45 #define SYM
(x
) CONCAT1
(__USER_LABEL_PREFIX__
, x
)
49 #define __PLT__
/* Not supported
in Thumb assembler
(for now
).
*/
53 #define
TYPE(x
) .
type SYM
(x
),function
54 #define
SIZE(x
) .
size SYM
(x
), .
- SYM
(x
)
63 /* Function
end macros. Variants for interworking.
*/
65 @
This selects the minimum architecture level required.
66 #define __ARM_ARCH__
3
68 #if defined
(__ARM_ARCH_3M__
) || defined
(__ARM_ARCH_4__
) \
69 || defined
(__ARM_ARCH_4T__
)
70 /* We use __ARM_ARCH__ set to
4 here
, but
in reality it
's any processor with
71 long multiply instructions. That includes v3M. */
73 # define __ARM_ARCH__ 4
76 #if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
77 || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
78 || defined(__ARM_ARCH_5TEJ__)
80 # define __ARM_ARCH__ 5
83 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
84 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
85 || defined(__ARM_ARCH_6ZK__)
87 # define __ARM_ARCH__ 6
90 /* How to return from a function call depends on the architecture variant. */
92 #if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
95 # define RETc(x) bx##x lr
97 /* Special precautions for interworking on armv4t. */
98 # if (__ARM_ARCH__ == 4)
100 /* Always use bx, not ldr pc. */
101 # if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
102 # define __INTERWORKING__
103 # endif /* __THUMB__ || __THUMB_INTERWORK__ */
105 /* Include thumb stub before arm mode code. */
106 # if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
107 # define __INTERWORKING_STUBS__
108 # endif /* __thumb__ && !__THUMB_INTERWORK__ */
110 #endif /* __ARM_ARCH == 4 */
114 # define RET mov pc, lr
115 # define RETc(x) mov##x pc, lr
119 .macro cfi_pop advance, reg, cfa_offset
121 .pushsection .debug_frame
122 .byte 0x4 /* DW_CFA_advance_loc4 */
124 .byte (0xc0 | \reg) /* DW_CFA_restore */
125 .byte 0xe /* DW_CFA_def_cfa_offset */
130 .macro cfi_push advance, reg, offset, cfa_offset
132 .pushsection .debug_frame
133 .byte 0x4 /* DW_CFA_advance_loc4 */
135 .byte (0x80 | \reg) /* DW_CFA_offset */
136 .uleb128 (\offset / -4)
137 .byte 0xe /* DW_CFA_def_cfa_offset */
142 .macro cfi_start start_label, end_label
144 .pushsection .debug_frame
146 .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
148 .4byte 0xffffffff @ CIE Identifier Tag
149 .byte 0x1 @ CIE Version
150 .ascii "\0" @ CIE Augmentation
151 .uleb128 0x1 @ CIE Code Alignment Factor
152 .sleb128 -4 @ CIE Data Alignment Factor
153 .byte 0xe @ CIE RA Column
154 .byte 0xc @ DW_CFA_def_cfa
160 .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
162 .4byte LSYM(Lstart_frame) @ FDE CIE offset
163 .4byte \start_label @ FDE initial location
164 .4byte \end_label-\start_label @ FDE address range
168 .macro cfi_end end_label
170 .pushsection .debug_frame
178 /* Don't pass dirn
, it
's there just to get token pasting right. */
180 .macro RETLDM regs=, cond=, unwind=, dirn=ia
181 #if defined (__INTERWORKING__)
183 ldr\cond lr, [sp], #8
185 ldm\cond\dirn sp!, {\regs, lr}
188 /* Mark LR as restored. */
189 97: cfi_pop 97b - \unwind, 0xe, 0x0
194 ldr\cond pc, [sp], #8
196 ldm\cond\dirn sp!, {\regs, pc}
202 .macro ARM_LDIV0 name
204 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
205 bl SYM (__div0) __PLT__
206 mov r0, #0 @ About as wrong as it could be.
211 .macro THUMB_LDIV0 name
213 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
215 mov r0, #0 @ About as wrong as it could be.
216 #if defined (__INTERWORKING__)
228 .macro DIV_FUNC_END name
229 cfi_start __\name, LSYM(Lend_div0)
236 cfi_end LSYM(Lend_div0)
240 .macro THUMB_FUNC_START name
247 /* Function start macros. Variants for ARM and Thumb. */
250 #define THUMB_FUNC .thumb_func
251 #define THUMB_CODE .force_thumb
257 .macro FUNC_START name
267 /* Special function that will always be coded in ARM assembly, even if
268 in Thumb-only compilation. */
270 #if defined(__INTERWORKING_STUBS__)
271 .macro ARM_FUNC_START name
276 /* A hook to tell gdb that we've switched to ARM mode. Also used to
call
277 directly from other
local arm routines.
*/
280 #define EQUIV .thumb_set
281 /* Branch directly to a function declared with ARM_FUNC_START.
282 Must be called
in arm mode.
*/
287 .
macro ARM_FUNC_START
name
301 .
macro FUNC_ALIAS new old
303 #if defined
(__thumb__
)
304 .thumb_set SYM
(__
\new
), SYM
(__\old
)
306 .set SYM
(__
\new
), SYM
(__\old
)
310 .
macro ARM_FUNC_ALIAS new old
312 EQUIV SYM
(__
\new
), SYM
(__\old
)
313 #if defined
(__INTERWORKING_STUBS__
)
314 .set SYM
(_L__
\new
), SYM
(_L__\old
)
319 /* Register aliases.
*/
321 work .req r4 @ XXXX is
this safe
?
335 /* ------------------------------------------------------------------------ */
336 /* Bodies of the division
and modulo routines.
*/
337 /* ------------------------------------------------------------------------ */
338 .
macro ARM_DIV_BODY dividend
, divisor
, result
, curbit
340 #if __ARM_ARCH__
>= 5 && ! defined
(__OPTIMIZE_SIZE__
)
342 clz \curbit
, \dividend
343 clz
\result
, \divisor
344 sub \curbit
, \result
, \curbit
345 rsbs \curbit
, \curbit
, #
31
346 addne \curbit
, \curbit
, \curbit
, lsl #
1
348 addne pc
, pc
, \curbit
, lsl #
2
352 .set shift
, shift
- 1
353 cmp \dividend
, \divisor
, lsl #shift
354 adc \result
, \result
, \result
355 subcs \dividend
, \dividend
, \divisor
, lsl #shift
358 #else
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
359 #if __ARM_ARCH__
>= 5
361 clz \curbit
, \divisor
362 clz
\result
, \dividend
363 sub \result
, \curbit
, \result
365 mov \divisor
, \divisor
, lsl \result
366 mov \curbit
, \curbit
, lsl \result
369 #else
/* __ARM_ARCH__
< 5 */
371 @ Initially shift the divisor left
3 bits if possible
,
372 @ set curbit accordingly.
This allows for curbit to be located
373 @ at the left
end of each
4 bit nibbles
in the division
loop
374 @ to save one
loop in most cases.
375 tst \divisor
, #
0xe0000000
376 moveq \divisor
, \divisor
, lsl #
3
380 @ Unless the divisor is very big
, shift it up
in multiples of
381 @ four bits
, since
this is the amount of unwinding
in the main
382 @ division
loop. Continue shifting until the divisor is
383 @ larger than the dividend.
384 1: cmp \divisor
, #
0x10000000
385 cmplo \divisor
, \dividend
386 movlo \divisor
, \divisor
, lsl #
4
387 movlo \curbit
, \curbit
, lsl #
4
390 @ For very big divisors
, we must shift it a bit at a time
, or
391 @ we will be
in danger of overflowing.
392 1: cmp \divisor
, #
0x80000000
393 cmplo \divisor
, \dividend
394 movlo \divisor
, \divisor
, lsl #
1
395 movlo \curbit
, \curbit
, lsl #
1
400 #endif
/* __ARM_ARCH__
< 5 */
403 1: cmp \dividend
, \divisor
404 subhs \dividend
, \dividend
, \divisor
405 orrhs
\result
, \result
, \curbit
406 cmp \dividend
, \divisor
, lsr #
1
407 subhs \dividend
, \dividend
, \divisor
, lsr #
1
408 orrhs
\result
, \result
, \curbit
, lsr #
1
409 cmp \dividend
, \divisor
, lsr #
2
410 subhs \dividend
, \dividend
, \divisor
, lsr #
2
411 orrhs
\result
, \result
, \curbit
, lsr #
2
412 cmp \dividend
, \divisor
, lsr #
3
413 subhs \dividend
, \dividend
, \divisor
, lsr #
3
414 orrhs
\result
, \result
, \curbit
, lsr #
3
415 cmp \dividend
, #
0 @ Early termination
?
416 movnes \curbit
, \curbit
, lsr #
4 @ No
, any more bits to do
?
417 movne \divisor
, \divisor
, lsr #
4
420 #endif
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
423 /* ------------------------------------------------------------------------ */
424 .
macro ARM_DIV2_ORDER divisor
, order
426 #if __ARM_ARCH__
>= 5
429 rsb \order
, \order
, #
31
433 cmp \divisor
, #
(1 << 16)
434 movhs \divisor
, \divisor
, lsr #
16
438 cmp \divisor
, #
(1 << 8)
439 movhs \divisor
, \divisor
, lsr #
8
440 addhs \order
, \order
, #
8
442 cmp \divisor
, #
(1 << 4)
443 movhs \divisor
, \divisor
, lsr #
4
444 addhs \order
, \order
, #
4
446 cmp \divisor
, #
(1 << 2)
447 addhi \order
, \order
, #
3
448 addls \order
, \order
, \divisor
, lsr #
1
453 /* ------------------------------------------------------------------------ */
454 .
macro ARM_MOD_BODY dividend
, divisor
, order
, spare
456 #if __ARM_ARCH__
>= 5 && ! defined
(__OPTIMIZE_SIZE__
)
459 clz \spare
, \dividend
460 sub \order
, \order
, \spare
461 rsbs \order
, \order
, #
31
462 addne pc
, pc
, \order
, lsl #
3
466 .set shift
, shift
- 1
467 cmp \dividend
, \divisor
, lsl #shift
468 subcs \dividend
, \dividend
, \divisor
, lsl #shift
471 #else
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
472 #if __ARM_ARCH__
>= 5
475 clz \spare
, \dividend
476 sub \order
, \order
, \spare
477 mov \divisor
, \divisor
, lsl \order
479 #else
/* __ARM_ARCH__
< 5 */
483 @ Unless the divisor is very big
, shift it up
in multiples of
484 @ four bits
, since
this is the amount of unwinding
in the main
485 @ division
loop. Continue shifting until the divisor is
486 @ larger than the dividend.
487 1: cmp \divisor
, #
0x10000000
488 cmplo \divisor
, \dividend
489 movlo \divisor
, \divisor
, lsl #
4
490 addlo \order
, \order
, #
4
493 @ For very big divisors
, we must shift it a bit at a time
, or
494 @ we will be
in danger of overflowing.
495 1: cmp \divisor
, #
0x80000000
496 cmplo \divisor
, \dividend
497 movlo \divisor
, \divisor
, lsl #
1
498 addlo \order
, \order
, #
1
501 #endif
/* __ARM_ARCH__
< 5 */
503 @ Perform all needed substractions to keep only the reminder.
504 @ Do comparisons
in batch of
4 first.
505 subs \order
, \order
, #
3 @ yes
, 3 is intended here
508 1: cmp \dividend
, \divisor
509 subhs \dividend
, \dividend
, \divisor
510 cmp \dividend
, \divisor
, lsr #
1
511 subhs \dividend
, \dividend
, \divisor
, lsr #
1
512 cmp \dividend
, \divisor
, lsr #
2
513 subhs \dividend
, \dividend
, \divisor
, lsr #
2
514 cmp \dividend
, \divisor
, lsr #
3
515 subhs \dividend
, \dividend
, \divisor
, lsr #
3
517 mov \divisor
, \divisor
, lsr #
4
518 subges \order
, \order
, #
4
525 @ Either
1, 2 or 3 comparison
/substractions are left.
529 cmp \dividend
, \divisor
530 subhs \dividend
, \dividend
, \divisor
531 mov \divisor
, \divisor
, lsr #
1
532 3: cmp \dividend
, \divisor
533 subhs \dividend
, \dividend
, \divisor
534 mov \divisor
, \divisor
, lsr #
1
535 4: cmp \dividend
, \divisor
536 subhs \dividend
, \dividend
, \divisor
539 #endif
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
542 /* ------------------------------------------------------------------------ */
543 .
macro THUMB_DIV_MOD_BODY modulo
544 @ Load the constant
0x10000000 into our work register.
548 @ Unless the divisor is very big
, shift it up
in multiples of
549 @ four bits
, since
this is the amount of unwinding
in the main
550 @ division
loop. Continue shifting until the divisor is
551 @ larger than the dividend.
554 cmp divisor
, dividend
560 @ Set work to
0x80000000
563 @ For very big divisors
, we must shift it a bit at a time
, or
564 @ we will be
in danger of overflowing.
567 cmp divisor
, dividend
573 @
Test for possible subtractions ...
575 @ ... On the final pass
, this may subtract too much from the dividend
,
576 @ so keep track of which subtractions are done
, we can fix them up
579 cmp dividend
, divisor
581 sub dividend
, dividend
, divisor
583 lsr work
, divisor
, #
1
586 sub dividend
, dividend
, work
593 lsr work
, divisor
, #
2
596 sub dividend
, dividend
, work
603 lsr work
, divisor
, #
3
606 sub dividend
, dividend
, work
615 @ ...
and note which bits are done
in the result. On the final pass
,
616 @
this may subtract too much from the dividend
, but the result will be ok
,
617 @ since the
"bit" will have been shifted
out at the bottom.
618 cmp dividend
, divisor
620 sub dividend
, dividend
, divisor
621 orr result
, result
, curbit
623 lsr work
, divisor
, #
1
626 sub dividend
, dividend
, work
630 lsr work
, divisor
, #
2
633 sub dividend
, dividend
, work
637 lsr work
, divisor
, #
3
640 sub dividend
, dividend
, work
646 cmp dividend
, #
0 @ Early termination
?
648 lsr curbit
, #
4 @ No
, any more bits to do
?
654 @ Any subtractions that we should
not have done will be recorded
in
655 @ the top three bits of
"overdone". Exactly which were
not needed
656 @ are governed by the position of the bit
, stored
in ip.
660 beq LSYM
(Lgot_result
)
662 @ If we terminated early
, because dividend became zero
, then the
663 @ bit
in ip will
not be
in the bottom nibble
, and we should
not
664 @ perform the additions below. We must
test for
this though
665 @
(rather relying upon the TSTs to prevent the additions
) since
666 @ the bit
in ip could be
in the top two bits which might then match
667 @ with one of the smaller RORs.
671 beq LSYM
(Lgot_result
)
678 lsr work
, divisor
, #
3
686 lsr work
, divisor
, #
2
693 beq LSYM
(Lgot_result
)
694 lsr work
, divisor
, #
1
699 /* ------------------------------------------------------------------------ */
700 /* Start of the Real Functions
*/
701 /* ------------------------------------------------------------------------ */
714 cmp dividend
, divisor
715 blo LSYM
(Lgot_result
)
723 #else
/* ARM version.
*/
733 ARM_DIV_BODY r0
, r1
, r2
, r3
742 12: ARM_DIV2_ORDER r1
, r2
747 #endif
/* ARM version
*/
751 FUNC_START aeabi_uidivmod
760 stmfd
sp!, { r0, r1, lr }
762 ldmfd
sp!, { r1, r2, lr }
767 FUNC_END aeabi_uidivmod
769 #endif
/* L_udivsi3
*/
770 /* ------------------------------------------------------------------------ */
780 cmp dividend
, divisor
792 #else
/* ARM version.
*/
794 subs r2
, r1
, #
1 @ compare divisor with
1
796 cmpne r0
, r1 @ compare dividend with divisor
798 tsthi r1
, r2 @ see if divisor is power of
2
802 ARM_MOD_BODY r0
, r1
, r2
, r3
806 #endif
/* ARM version.
*/
810 #endif
/* L_umodsi3
*/
811 /* ------------------------------------------------------------------------ */
822 eor work
, divisor @ Save the sign of the result.
828 neg divisor
, divisor @ Loops below use unsigned.
832 neg dividend
, dividend
834 cmp dividend
, divisor
835 blo LSYM
(Lgot_result
)
848 #else
/* ARM version.
*/
851 eor ip
, r0
, r1 @ save the sign of the result.
853 rsbmi r1
, r1
, #
0 @ loops below use unsigned.
854 subs r2
, r1
, #
1 @ division by
1 or -1 ?
857 rsbmi r3
, r0
, #
0 @ positive dividend value
860 tst r1
, r2 @ divisor is power of
2 ?
863 ARM_DIV_BODY r3
, r1
, r0
, r2
869 10: teq ip
, r0 @ same sign
?
874 moveq r0
, ip
, asr #
31
878 12: ARM_DIV2_ORDER r1
, r2
885 #endif
/* ARM version
*/
889 FUNC_START aeabi_idivmod
898 stmfd
sp!, { r0, r1, lr }
900 ldmfd
sp!, { r1, r2, lr }
905 FUNC_END aeabi_idivmod
907 #endif
/* L_divsi3
*/
908 /* ------------------------------------------------------------------------ */
919 neg divisor
, divisor @ Loops below use unsigned.
922 @ Need to save the sign of the dividend
, unfortunately
, we need
923 @ work later on. Must do
this after saving the original value of
924 @ the work register
, because we will
pop this value off first.
928 neg dividend
, dividend
930 cmp dividend
, divisor
931 blo LSYM
(Lgot_result
)
938 neg dividend
, dividend
943 #else
/* ARM version.
*/
947 rsbmi r1
, r1
, #
0 @ loops below use unsigned.
948 movs ip
, r0 @ preserve sign of dividend
949 rsbmi r0
, r0
, #
0 @ if negative make positive
950 subs r2
, r1
, #
1 @ compare divisor with
1
951 cmpne r0
, r1 @ compare dividend with divisor
953 tsthi r1
, r2 @ see if divisor is power of
2
957 ARM_MOD_BODY r0
, r1
, r2
, r3
963 #endif
/* ARM version
*/
967 #endif
/* L_modsi3
*/
968 /* ------------------------------------------------------------------------ */
972 FUNC_ALIAS aeabi_idiv0 div0
973 FUNC_ALIAS aeabi_ldiv0 div0
981 #endif
/* L_divmodsi_tools
*/
982 /* ------------------------------------------------------------------------ */
984 @ GNU
/Linux division
-by zero handler. Used
in place of L_dvmd_tls
986 /* Constants taken from
<asm
/unistd.h
> and <asm
/signal.h
> */
988 #define __NR_SYSCALL_BASE
0x900000
989 #define __NR_getpid
(__NR_SYSCALL_BASE
+ 20)
990 #define __NR_kill
(__NR_SYSCALL_BASE
+ 37)
991 #define __NR_gettid
(__NR_SYSCALL_BASE
+ 224)
992 #define __NR_tkill
(__NR_SYSCALL_BASE
+ 238)
1013 #endif
/* L_dvmd_lnx
*/
1014 /* ------------------------------------------------------------------------ */
1015 /* Dword shift operations.
*/
1016 /* All the following
Dword shift variants rely on the fact that
1019 shft xxx
, (Reg
& 255)
1020 so for Reg value
in (32..
.63) and (-1...
-31) we will get zero
(in the
1021 case of logical shifts
) or the sign
(for asr
).
*/
1034 FUNC_ALIAS aeabi_llsr lshrdi3
1052 movmi
al, al, lsr r2
1053 movpl
al, ah, lsr r3
1054 orrmi
al, al, ah, lsl ip
1066 FUNC_ALIAS aeabi_lasr ashrdi3
1073 @ If r2 is negative at
this point the following step would
OR
1074 @ the sign bit
into all of
AL. That
's not what we want...
1088 movmi al, al, lsr r2
1089 movpl al, ah, asr r3
1090 orrmi al, al, ah, lsl ip
1103 FUNC_ALIAS aeabi_llsl ashldi3
1121 movmi ah, ah, lsl r2
1122 movpl ah, al, lsl r3
1123 orrmi ah, ah, al, lsr ip
1132 /* ------------------------------------------------------------------------ */
1133 /* These next two sections are here despite the fact that they contain Thumb
1134 assembler because their presence allows interworked code to be linked even
1135 when the GCC library is this one. */
1137 /* Do not build the interworking functions when the target architecture does
1138 not support Thumb instructions. (This can be a multilib option). */
1139 #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
1140 || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
1141 || __ARM_ARCH__ >= 6
1143 #if defined L_call_via_rX
1145 /* These labels & instructions are used by the Arm/Thumb interworking code.
1146 The address of function to be called is loaded into a register and then
1147 one of these labels is called via a BL instruction. This puts the
1148 return address into the link register with the bottom bit set, and the
1149 code here switches to the correct mode before executing the function. */
1155 .macro call_via register
1156 THUMB_FUNC_START _call_via_\register
1161 SIZE (_call_via_\register)
1180 #endif /* L_call_via_rX */
1182 #if defined L_interwork_call_via_rX
1184 /* These labels & instructions are used by the Arm/Thumb interworking code,
1185 when the target address is in an unknown instruction set. The address
1186 of function to be called is loaded into a register and then one of these
1187 labels is called via a BL instruction. This puts the return address
1188 into the link register with the bottom bit set, and the code here
1189 switches to the correct mode before executing the function. Unfortunately
1190 the target code cannot be relied upon to return via a BX instruction, so
1191 instead we have to store the resturn address on the stack and allow the
1192 called function to return here instead. Upon return we recover the real
1193 return address and use a BX to get back to Thumb mode.
1195 There are three variations of this code. The first,
1196 _interwork_call_via_rN(), will push the return address onto the
1197 stack and pop it in _arm_return(). It should only be used if all
1198 arguments are passed in registers.
1200 The second, _interwork_r7_call_via_rN(), instead stores the return
1201 address at [r7, #-4]. It is the caller's responsibility to ensure
1202 that
this address is valid
and contains no useful data.
1204 The third
, _interwork_r11_call_via_rN
(), works
in the same way but
1205 uses r11 instead of r7. It is useful if the caller does
not really
1206 need a frame pointer.
*/
1213 LSYM
(Lstart_arm_return
):
1214 cfi_start LSYM
(Lstart_arm_return
) LSYM
(Lend_arm_return
)
1215 cfi_push
0, 0xe, -0x8, 0x8
1216 nop @
This nop is for the benefit of debuggers
, so that
1217 @ backtraces will use the correct unwind information.
1219 RETLDM unwind
=LSYM
(Lstart_arm_return
)
1220 cfi_end LSYM
(Lend_arm_return
)
1222 .globl _arm_return_r7
1227 .globl _arm_return_r11
1232 .
macro interwork_with_frame frame
, register
, name, return
1235 THUMB_FUNC_START
\name
1242 streq lr
, [\frame
, #
-4]
1243 adreq lr
, _arm_return_
\frame
1249 .
macro interwork register
1252 THUMB_FUNC_START _interwork_call_via_
\register
1258 .globl LSYM
(Lchange_
\register
)
1259 LSYM
(Lchange_
\register
):
1261 streq lr
, [sp, #
-8]!
1262 adreq lr
, _arm_return
1265 SIZE (_interwork_call_via_
\register
)
1267 interwork_with_frame r7
,\register
,_interwork_r7_call_via_
\register
1268 interwork_with_frame r11
,\register
,_interwork_r11_call_via_
\register
1286 /* The LR case has to be handled a little differently...
*/
1289 THUMB_FUNC_START _interwork_call_via_lr
1298 stmeqdb r13
!, {lr, pc}
1300 adreq lr
, _arm_return
1303 SIZE (_interwork_call_via_lr
)
1305 #endif
/* L_interwork_call_via_rX
*/
1306 #endif
/* Arch supports thumb.
*/
1309 #
include "ieee754-df.S"
1310 #
include "ieee754-sf.S"
1312 #endif
/* __symbian__
*/