1 @ libgcc routines for ARM cpu.
2 @ Division routines
, written by Richard Earnshaw
, (rearnsha
@armltd.co.uk
)
4 /* Copyright
1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005, 2007
5 Free Software Foundation
, Inc.
7 This file is free software
; you can redistribute it and/or modify it
8 under the terms of the GNU General
Public License as published by the
9 Free Software Foundation
; either version 2, or (at your option) any
12 In addition to the permissions
in the GNU General
Public License
, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of
this file
into combinations with other programs
,
15 and to distribute those combinations without any restriction coming
16 from the use of
this file.
(The General
Public License restrictions
17 do apply
in other respects
; for example, they cover modification of
18 the file
, and distribution when
not linked
into a combine
21 This file is distributed
in the hope that it will be useful
, but
22 WITHOUT ANY WARRANTY
; without even the implied warranty of
23 MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General
Public License for more details.
26 You should have received a copy of the GNU General
Public License
27 along with
this program
; see the file COPYING. If not, write to
28 the Free Software Foundation
, 51 Franklin Street
, Fifth Floor
,
29 Boston
, MA
02110-1301, USA.
*/
31 /* An executable stack is
*not* required for these functions.
*/
32 #if defined
(__ELF__
) && defined
(__linux__
)
33 .
section .note.GNU
-stack
,"",%progbits
37 /* ------------------------------------------------------------------------ */
39 /* We need to know what prefix to
add to function names.
*/
41 #ifndef __USER_LABEL_PREFIX__
42 #error __USER_LABEL_PREFIX__
not defined
45 /* ANSI concatenation macros.
*/
47 #define CONCAT1
(a
, b
) CONCAT2
(a
, b
)
48 #define CONCAT2
(a
, b
) a ## b
50 /* Use the right prefix for
global labels.
*/
52 #define SYM
(x
) CONCAT1
(__USER_LABEL_PREFIX__
, x
)
56 #define __PLT__
/* Not supported
in Thumb assembler
(for now
).
*/
60 #define
TYPE(x
) .
type SYM
(x
),function
61 #define
SIZE(x
) .
size SYM
(x
), .
- SYM
(x
)
70 /* Function
end macros. Variants for interworking.
*/
72 #if defined
(__ARM_ARCH_3M__
) || defined
(__ARM_ARCH_4__
) \
73 || defined
(__ARM_ARCH_4T__
)
74 /* We use __ARM_ARCH__ set to
4 here
, but
in reality it
's any processor with
75 long multiply instructions. That includes v3M. */
76 # define __ARM_ARCH__ 4
79 #if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
80 || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
81 || defined(__ARM_ARCH_5TEJ__)
82 # define __ARM_ARCH__ 5
85 #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
86 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
87 || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
88 # define __ARM_ARCH__ 6
91 #if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
92 || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__)
93 # define __ARM_ARCH__ 7
97 #error Unable to determine architecture.
100 /* How to return from a function call depends on the architecture variant. */
102 #if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
105 # define RETc(x) bx##x lr
107 /* Special precautions for interworking on armv4t. */
108 # if (__ARM_ARCH__ == 4)
110 /* Always use bx, not ldr pc. */
111 # if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
112 # define __INTERWORKING__
113 # endif /* __THUMB__ || __THUMB_INTERWORK__ */
115 /* Include thumb stub before arm mode code. */
116 # if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
117 # define __INTERWORKING_STUBS__
118 # endif /* __thumb__ && !__THUMB_INTERWORK__ */
120 #endif /* __ARM_ARCH == 4 */
124 # define RET mov pc, lr
125 # define RETc(x) mov##x pc, lr
129 .macro cfi_pop advance, reg, cfa_offset
131 .pushsection .debug_frame
132 .byte 0x4 /* DW_CFA_advance_loc4 */
134 .byte (0xc0 | \reg) /* DW_CFA_restore */
135 .byte 0xe /* DW_CFA_def_cfa_offset */
140 .macro cfi_push advance, reg, offset, cfa_offset
142 .pushsection .debug_frame
143 .byte 0x4 /* DW_CFA_advance_loc4 */
145 .byte (0x80 | \reg) /* DW_CFA_offset */
146 .uleb128 (\offset / -4)
147 .byte 0xe /* DW_CFA_def_cfa_offset */
152 .macro cfi_start start_label, end_label
154 .pushsection .debug_frame
156 .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
158 .4byte 0xffffffff @ CIE Identifier Tag
159 .byte 0x1 @ CIE Version
160 .ascii "\0" @ CIE Augmentation
161 .uleb128 0x1 @ CIE Code Alignment Factor
162 .sleb128 -4 @ CIE Data Alignment Factor
163 .byte 0xe @ CIE RA Column
164 .byte 0xc @ DW_CFA_def_cfa
170 .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length
172 .4byte LSYM(Lstart_frame) @ FDE CIE offset
173 .4byte \start_label @ FDE initial location
174 .4byte \end_label-\start_label @ FDE address range
178 .macro cfi_end end_label
180 .pushsection .debug_frame
188 /* Don't pass dirn
, it
's there just to get token pasting right. */
190 .macro RETLDM regs=, cond=, unwind=, dirn=ia
191 #if defined (__INTERWORKING__)
193 ldr\cond lr, [sp], #8
195 # if defined(__thumb2__)
198 ldm\cond\dirn sp!, {\regs, lr}
202 /* Mark LR as restored. */
203 97: cfi_pop 97b - \unwind, 0xe, 0x0
207 /* Caller is responsible for providing IT instruction. */
209 ldr\cond pc, [sp], #8
211 # if defined(__thumb2__)
214 ldm\cond\dirn sp!, {\regs, lr}
220 /* The Unified assembly syntax allows the same code to be assembled for both
221 ARM and Thumb-2. However this is only supported by recent gas, so define
222 a set of macros to allow ARM code on older assemblers. */
223 #if defined(__thumb2__)
224 .macro do_it cond, suffix=""
227 .macro shift1 op, arg0, arg1, arg2
228 \op \arg0, \arg1, \arg2
232 #define COND(op1, op2, cond) op1 ## op2 ## cond
233 /* Perform an arithmetic operation with a variable shift operand. This
234 requires two instructions and a scratch register on Thumb-2. */
235 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
236 \shiftop \tmp, \src2, \shiftreg
237 \name \dest, \src1, \tmp
240 .macro do_it cond, suffix=""
242 .macro shift1 op, arg0, arg1, arg2
243 mov \arg0, \arg1, \op \arg2
245 #define do_push stmfd sp!,
246 #define do_pop ldmfd sp!,
247 #define COND(op1, op2, cond) op1 ## cond ## op2
248 .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
249 \name \dest, \src1, \src2, \shiftop \shiftreg
253 .macro ARM_LDIV0 name
255 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8
256 bl SYM (__div0) __PLT__
257 mov r0, #0 @ About as wrong as it could be.
262 .macro THUMB_LDIV0 name
264 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8
266 mov r0, #0 @ About as wrong as it could be.
267 #if defined (__INTERWORKING__)
279 .macro DIV_FUNC_END name
280 cfi_start __\name, LSYM(Lend_div0)
287 cfi_end LSYM(Lend_div0)
291 .macro THUMB_FUNC_START name
298 /* Function start macros. Variants for ARM and Thumb. */
301 #define THUMB_FUNC .thumb_func
302 #define THUMB_CODE .force_thumb
303 # if defined(__thumb2__)
304 #define THUMB_SYNTAX .syntax divided
314 .macro FUNC_START name
325 /* Special function that will always be coded in ARM assembly, even if
326 in Thumb-only compilation. */
328 #if defined(__thumb2__)
330 /* For Thumb-2 we build everything in thumb mode. */
331 .macro ARM_FUNC_START name
335 #define EQUIV .thumb_set
340 #elif defined(__INTERWORKING_STUBS__)
342 .macro ARM_FUNC_START name
347 /* A hook to tell gdb that we've switched to ARM mode. Also used to
call
348 directly from other
local arm routines.
*/
351 #define EQUIV .thumb_set
352 /* Branch directly to a function declared with ARM_FUNC_START.
353 Must be called
in arm mode.
*/
358 #else
/* !(__INTERWORKING_STUBS__ || __thumb2__
) */
360 .
macro ARM_FUNC_START
name
375 .
macro FUNC_ALIAS new old
377 #if defined
(__thumb__
)
378 .thumb_set SYM
(__
\new
), SYM
(__\old
)
380 .set SYM
(__
\new
), SYM
(__\old
)
384 .
macro ARM_FUNC_ALIAS new old
386 EQUIV SYM
(__
\new
), SYM
(__\old
)
387 #if defined
(__INTERWORKING_STUBS__
)
388 .set SYM
(_L__
\new
), SYM
(_L__\old
)
393 /* Register aliases.
*/
395 work .req r4 @ XXXX is
this safe
?
409 /* ------------------------------------------------------------------------ */
410 /* Bodies of the division
and modulo routines.
*/
411 /* ------------------------------------------------------------------------ */
412 .
macro ARM_DIV_BODY dividend
, divisor
, result
, curbit
414 #if __ARM_ARCH__
>= 5 && ! defined
(__OPTIMIZE_SIZE__
)
416 clz \curbit
, \dividend
417 clz
\result
, \divisor
418 sub \curbit
, \result
, \curbit
419 rsbs \curbit
, \curbit
, #
31
420 addne \curbit
, \curbit
, \curbit
, lsl #
1
422 addne pc
, pc
, \curbit
, lsl #
2
426 .set shift
, shift
- 1
427 cmp \dividend
, \divisor
, lsl #shift
428 adc \result
, \result
, \result
429 subcs \dividend
, \dividend
, \divisor
, lsl #shift
432 #else
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
433 #if __ARM_ARCH__
>= 5
435 clz \curbit
, \divisor
436 clz
\result
, \dividend
437 sub \result
, \curbit
, \result
439 mov \divisor
, \divisor
, lsl \result
440 mov \curbit
, \curbit
, lsl \result
443 #else
/* __ARM_ARCH__
< 5 */
445 @ Initially shift the divisor left
3 bits if possible
,
446 @ set curbit accordingly.
This allows for curbit to be located
447 @ at the left
end of each
4 bit nibbles
in the division
loop
448 @ to save one
loop in most cases.
449 tst \divisor
, #
0xe0000000
450 moveq \divisor
, \divisor
, lsl #
3
454 @ Unless the divisor is very big
, shift it up
in multiples of
455 @ four bits
, since
this is the amount of unwinding
in the main
456 @ division
loop. Continue shifting until the divisor is
457 @ larger than the dividend.
458 1: cmp \divisor
, #
0x10000000
459 cmplo \divisor
, \dividend
460 movlo \divisor
, \divisor
, lsl #
4
461 movlo \curbit
, \curbit
, lsl #
4
464 @ For very big divisors
, we must shift it a bit at a time
, or
465 @ we will be
in danger of overflowing.
466 1: cmp \divisor
, #
0x80000000
467 cmplo \divisor
, \dividend
468 movlo \divisor
, \divisor
, lsl #
1
469 movlo \curbit
, \curbit
, lsl #
1
474 #endif
/* __ARM_ARCH__
< 5 */
477 1: cmp \dividend
, \divisor
478 subhs \dividend
, \dividend
, \divisor
479 orrhs
\result
, \result
, \curbit
480 cmp \dividend
, \divisor
, lsr #
1
481 subhs \dividend
, \dividend
, \divisor
, lsr #
1
482 orrhs
\result
, \result
, \curbit
, lsr #
1
483 cmp \dividend
, \divisor
, lsr #
2
484 subhs \dividend
, \dividend
, \divisor
, lsr #
2
485 orrhs
\result
, \result
, \curbit
, lsr #
2
486 cmp \dividend
, \divisor
, lsr #
3
487 subhs \dividend
, \dividend
, \divisor
, lsr #
3
488 orrhs
\result
, \result
, \curbit
, lsr #
3
489 cmp \dividend
, #
0 @ Early termination
?
490 movnes \curbit
, \curbit
, lsr #
4 @ No
, any more bits to do
?
491 movne \divisor
, \divisor
, lsr #
4
494 #endif
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
497 /* ------------------------------------------------------------------------ */
498 .
macro ARM_DIV2_ORDER divisor
, order
500 #if __ARM_ARCH__
>= 5
503 rsb \order
, \order
, #
31
507 cmp \divisor
, #
(1 << 16)
508 movhs \divisor
, \divisor
, lsr #
16
512 cmp \divisor
, #
(1 << 8)
513 movhs \divisor
, \divisor
, lsr #
8
514 addhs \order
, \order
, #
8
516 cmp \divisor
, #
(1 << 4)
517 movhs \divisor
, \divisor
, lsr #
4
518 addhs \order
, \order
, #
4
520 cmp \divisor
, #
(1 << 2)
521 addhi \order
, \order
, #
3
522 addls \order
, \order
, \divisor
, lsr #
1
527 /* ------------------------------------------------------------------------ */
528 .
macro ARM_MOD_BODY dividend
, divisor
, order
, spare
530 #if __ARM_ARCH__
>= 5 && ! defined
(__OPTIMIZE_SIZE__
)
533 clz \spare
, \dividend
534 sub \order
, \order
, \spare
535 rsbs \order
, \order
, #
31
536 addne pc
, pc
, \order
, lsl #
3
540 .set shift
, shift
- 1
541 cmp \dividend
, \divisor
, lsl #shift
542 subcs \dividend
, \dividend
, \divisor
, lsl #shift
545 #else
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
546 #if __ARM_ARCH__
>= 5
549 clz \spare
, \dividend
550 sub \order
, \order
, \spare
551 mov \divisor
, \divisor
, lsl \order
553 #else
/* __ARM_ARCH__
< 5 */
557 @ Unless the divisor is very big
, shift it up
in multiples of
558 @ four bits
, since
this is the amount of unwinding
in the main
559 @ division
loop. Continue shifting until the divisor is
560 @ larger than the dividend.
561 1: cmp \divisor
, #
0x10000000
562 cmplo \divisor
, \dividend
563 movlo \divisor
, \divisor
, lsl #
4
564 addlo \order
, \order
, #
4
567 @ For very big divisors
, we must shift it a bit at a time
, or
568 @ we will be
in danger of overflowing.
569 1: cmp \divisor
, #
0x80000000
570 cmplo \divisor
, \dividend
571 movlo \divisor
, \divisor
, lsl #
1
572 addlo \order
, \order
, #
1
575 #endif
/* __ARM_ARCH__
< 5 */
577 @ Perform all needed substractions to keep only the reminder.
578 @ Do comparisons
in batch of
4 first.
579 subs \order
, \order
, #
3 @ yes
, 3 is intended here
582 1: cmp \dividend
, \divisor
583 subhs \dividend
, \dividend
, \divisor
584 cmp \dividend
, \divisor
, lsr #
1
585 subhs \dividend
, \dividend
, \divisor
, lsr #
1
586 cmp \dividend
, \divisor
, lsr #
2
587 subhs \dividend
, \dividend
, \divisor
, lsr #
2
588 cmp \dividend
, \divisor
, lsr #
3
589 subhs \dividend
, \dividend
, \divisor
, lsr #
3
591 mov \divisor
, \divisor
, lsr #
4
592 subges \order
, \order
, #
4
599 @ Either
1, 2 or 3 comparison
/substractions are left.
603 cmp \dividend
, \divisor
604 subhs \dividend
, \dividend
, \divisor
605 mov \divisor
, \divisor
, lsr #
1
606 3: cmp \dividend
, \divisor
607 subhs \dividend
, \dividend
, \divisor
608 mov \divisor
, \divisor
, lsr #
1
609 4: cmp \dividend
, \divisor
610 subhs \dividend
, \dividend
, \divisor
613 #endif
/* __ARM_ARCH__
< 5 || defined
(__OPTIMIZE_SIZE__
) */
616 /* ------------------------------------------------------------------------ */
617 .
macro THUMB_DIV_MOD_BODY modulo
618 @ Load the constant
0x10000000 into our work register.
622 @ Unless the divisor is very big
, shift it up
in multiples of
623 @ four bits
, since
this is the amount of unwinding
in the main
624 @ division
loop. Continue shifting until the divisor is
625 @ larger than the dividend.
628 cmp divisor
, dividend
634 @ Set work to
0x80000000
637 @ For very big divisors
, we must shift it a bit at a time
, or
638 @ we will be
in danger of overflowing.
641 cmp divisor
, dividend
647 @
Test for possible subtractions ...
649 @ ... On the final pass
, this may subtract too much from the dividend
,
650 @ so keep track of which subtractions are done
, we can fix them up
653 cmp dividend
, divisor
655 sub dividend
, dividend
, divisor
657 lsr work
, divisor
, #
1
660 sub dividend
, dividend
, work
667 lsr work
, divisor
, #
2
670 sub dividend
, dividend
, work
677 lsr work
, divisor
, #
3
680 sub dividend
, dividend
, work
689 @ ...
and note which bits are done
in the result. On the final pass
,
690 @
this may subtract too much from the dividend
, but the result will be ok
,
691 @ since the
"bit" will have been shifted
out at the bottom.
692 cmp dividend
, divisor
694 sub dividend
, dividend
, divisor
695 orr result
, result
, curbit
697 lsr work
, divisor
, #
1
700 sub dividend
, dividend
, work
704 lsr work
, divisor
, #
2
707 sub dividend
, dividend
, work
711 lsr work
, divisor
, #
3
714 sub dividend
, dividend
, work
720 cmp dividend
, #
0 @ Early termination
?
722 lsr curbit
, #
4 @ No
, any more bits to do
?
728 @ Any subtractions that we should
not have done will be recorded
in
729 @ the top three bits of
"overdone". Exactly which were
not needed
730 @ are governed by the position of the bit
, stored
in ip.
734 beq LSYM
(Lgot_result
)
736 @ If we terminated early
, because dividend became zero
, then the
737 @ bit
in ip will
not be
in the bottom nibble
, and we should
not
738 @ perform the additions below. We must
test for
this though
739 @
(rather relying upon the TSTs to prevent the additions
) since
740 @ the bit
in ip could be
in the top two bits which might then match
741 @ with one of the smaller RORs.
745 beq LSYM
(Lgot_result
)
752 lsr work
, divisor
, #
3
760 lsr work
, divisor
, #
2
767 beq LSYM
(Lgot_result
)
768 lsr work
, divisor
, #
1
773 /* ------------------------------------------------------------------------ */
774 /* Start of the Real Functions
*/
775 /* ------------------------------------------------------------------------ */
779 FUNC_ALIAS aeabi_uidiv udivsi3
789 cmp dividend
, divisor
790 blo LSYM
(Lgot_result
)
798 #else
/* ARM version.
*/
808 ARM_DIV_BODY r0
, r1
, r2
, r3
817 12: ARM_DIV2_ORDER r1
, r2
822 #endif
/* ARM version
*/
826 FUNC_START aeabi_uidivmod
835 stmfd
sp!, { r0, r1, lr }
837 ldmfd
sp!, { r1, r2, lr }
842 FUNC_END aeabi_uidivmod
844 #endif
/* L_udivsi3
*/
845 /* ------------------------------------------------------------------------ */
855 cmp dividend
, divisor
867 #else
/* ARM version.
*/
869 subs r2
, r1
, #
1 @ compare divisor with
1
871 cmpne r0
, r1 @ compare dividend with divisor
873 tsthi r1
, r2 @ see if divisor is power of
2
877 ARM_MOD_BODY r0
, r1
, r2
, r3
881 #endif
/* ARM version.
*/
885 #endif
/* L_umodsi3
*/
886 /* ------------------------------------------------------------------------ */
890 FUNC_ALIAS aeabi_idiv divsi3
898 eor work
, divisor @ Save the sign of the result.
904 neg divisor
, divisor @ Loops below use unsigned.
908 neg dividend
, dividend
910 cmp dividend
, divisor
911 blo LSYM
(Lgot_result
)
924 #else
/* ARM version.
*/
927 eor ip
, r0
, r1 @ save the sign of the result.
929 rsbmi r1
, r1
, #
0 @ loops below use unsigned.
930 subs r2
, r1
, #
1 @ division by
1 or -1 ?
933 rsbmi r3
, r0
, #
0 @ positive dividend value
936 tst r1
, r2 @ divisor is power of
2 ?
939 ARM_DIV_BODY r3
, r1
, r0
, r2
945 10: teq ip
, r0 @ same sign
?
950 moveq r0
, ip
, asr #
31
954 12: ARM_DIV2_ORDER r1
, r2
961 #endif
/* ARM version
*/
965 FUNC_START aeabi_idivmod
974 stmfd
sp!, { r0, r1, lr }
976 ldmfd
sp!, { r1, r2, lr }
981 FUNC_END aeabi_idivmod
983 #endif
/* L_divsi3
*/
984 /* ------------------------------------------------------------------------ */
995 neg divisor
, divisor @ Loops below use unsigned.
998 @ Need to save the sign of the dividend
, unfortunately
, we need
999 @ work later on. Must do
this after saving the original value of
1000 @ the work register
, because we will
pop this value off first.
1004 neg dividend
, dividend
1006 cmp dividend
, divisor
1007 blo LSYM
(Lgot_result
)
1009 THUMB_DIV_MOD_BODY
1
1014 neg dividend
, dividend
1019 #else
/* ARM version.
*/
1023 rsbmi r1
, r1
, #
0 @ loops below use unsigned.
1024 movs ip
, r0 @ preserve sign of dividend
1025 rsbmi r0
, r0
, #
0 @ if negative make positive
1026 subs r2
, r1
, #
1 @ compare divisor with
1
1027 cmpne r0
, r1 @ compare dividend with divisor
1029 tsthi r1
, r2 @ see if divisor is power of
2
1033 ARM_MOD_BODY r0
, r1
, r2
, r3
1039 #endif
/* ARM version
*/
1043 #endif
/* L_modsi3
*/
1044 /* ------------------------------------------------------------------------ */
1048 FUNC_ALIAS aeabi_idiv0 div0
1049 FUNC_ALIAS aeabi_ldiv0 div0
1053 FUNC_END aeabi_ldiv0
1054 FUNC_END aeabi_idiv0
1057 #endif
/* L_divmodsi_tools
*/
1058 /* ------------------------------------------------------------------------ */
1060 @ GNU
/Linux division
-by zero handler. Used
in place of L_dvmd_tls
1062 /* Constant taken from
<asm
/signal.h
>.
*/
1070 bl SYM
(raise
) __PLT__
1075 #endif
/* L_dvmd_lnx
*/
1076 /* ------------------------------------------------------------------------ */
1077 /* Dword shift operations.
*/
1078 /* All the following
Dword shift variants rely on the fact that
1081 shft xxx
, (Reg
& 255)
1082 so for Reg value
in (32..
.63) and (-1...
-31) we will get zero
(in the
1083 case of logical shifts
) or the sign
(for asr
).
*/
1093 /* Prevent __aeabi double
-word shifts from being produced on SymbianOS.
*/
1099 FUNC_ALIAS aeabi_llsr lshrdi3
1117 movmi
al, al, lsr r2
1118 movpl
al, ah, lsr r3
1119 orrmi
al, al, ah, lsl ip
1131 FUNC_ALIAS aeabi_lasr ashrdi3
1138 @ If r2 is negative at
this point the following step would
OR
1139 @ the sign bit
into all of
AL. That
's not what we want...
1153 movmi al, al, lsr r2
1154 movpl al, ah, asr r3
1155 orrmi al, al, ah, lsl ip
1168 FUNC_ALIAS aeabi_llsl ashldi3
1186 movmi ah, ah, lsl r2
1187 movpl ah, al, lsl r3
1188 orrmi ah, ah, al, lsr ip
1197 #endif /* __symbian__ */
1199 /* ------------------------------------------------------------------------ */
1200 /* These next two sections are here despite the fact that they contain Thumb
1201 assembler because their presence allows interworked code to be linked even
1202 when the GCC library is this one. */
1204 /* Do not build the interworking functions when the target architecture does
1205 not support Thumb instructions. (This can be a multilib option). */
1206 #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
1207 || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
1208 || __ARM_ARCH__ >= 6
1210 #if defined L_call_via_rX
1212 /* These labels & instructions are used by the Arm/Thumb interworking code.
1213 The address of function to be called is loaded into a register and then
1214 one of these labels is called via a BL instruction. This puts the
1215 return address into the link register with the bottom bit set, and the
1216 code here switches to the correct mode before executing the function. */
1222 .macro call_via register
1223 THUMB_FUNC_START _call_via_\register
1228 SIZE (_call_via_\register)
1247 #endif /* L_call_via_rX */
1249 /* Don't bother with the old interworking routines for Thumb
-2.
*/
1250 /* ??? Maybe only omit these on v7m.
*/
1253 #if defined L_interwork_call_via_rX
1255 /* These labels
& instructions are used by the Arm
/Thumb interworking code
,
1256 when the target address is
in an unknown instruction set. The address
1257 of function to be called is loaded
into a register
and then one of these
1258 labels is called via a
BL instruction.
This puts the return address
1259 into the link register with the bottom bit set
, and the code here
1260 switches to the correct mode before executing the function. Unfortunately
1261 the target code cannot be relied upon to return via a
BX instruction
, so
1262 instead we have to store the resturn address on the stack
and allow the
1263 called function to return here instead. Upon return we recover the real
1264 return address
and use a
BX to get back to Thumb mode.
1266 There are three variations of
this code. The first
,
1267 _interwork_call_via_rN
(), will
push the return address onto the
1268 stack
and pop it
in _arm_return
(). It should only be used if all
1269 arguments are passed
in registers.
1271 The second
, _interwork_r7_call_via_rN
(), instead stores the return
1272 address at
[r7
, #
-4]. It is the caller
's responsibility to ensure
1273 that this address is valid and contains no useful data.
1275 The third, _interwork_r11_call_via_rN(), works in the same way but
1276 uses r11 instead of r7. It is useful if the caller does not really
1277 need a frame pointer. */
1284 LSYM(Lstart_arm_return):
1285 cfi_start LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
1286 cfi_push 0, 0xe, -0x8, 0x8
1287 nop @ This nop is for the benefit of debuggers, so that
1288 @ backtraces will use the correct unwind information.
1290 RETLDM unwind=LSYM(Lstart_arm_return)
1291 cfi_end LSYM(Lend_arm_return)
1293 .globl _arm_return_r7
1298 .globl _arm_return_r11
1303 .macro interwork_with_frame frame, register, name, return
1306 THUMB_FUNC_START \name
1313 streq lr, [\frame, #-4]
1314 adreq lr, _arm_return_\frame
1320 .macro interwork register
1323 THUMB_FUNC_START _interwork_call_via_\register
1329 .globl LSYM(Lchange_\register)
1330 LSYM(Lchange_\register):
1332 streq lr, [sp, #-8]!
1333 adreq lr, _arm_return
1336 SIZE (_interwork_call_via_\register)
1338 interwork_with_frame r7,\register,_interwork_r7_call_via_\register
1339 interwork_with_frame r11,\register,_interwork_r11_call_via_\register
1357 /* The LR case has to be handled a little differently... */
1360 THUMB_FUNC_START _interwork_call_via_lr
1369 stmeqdb r13!, {lr, pc}
1371 adreq lr, _arm_return
1374 SIZE (_interwork_call_via_lr)
1376 #endif /* L_interwork_call_via_rX */
1377 #endif /* !__thumb2__ */
1378 #endif /* Arch supports thumb. */
1381 #include "ieee754-df.S"
1382 #include "ieee754-sf.S"
1384 #endif /* __symbian__ */