2003-12-26 Guilhem Lavaux <guilhem@kaffe.org>
[official-gcc.git] / gcc / config / arm / lib1funcs.asm
blobfe14070e8121fb59caba8bb7496707757c04533b
1 @ libgcc routines for ARM cpu.
2 @ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
4 /* Copyright 1995, 1996, 1998, 1999, 2000 Free Software Foundation, Inc.
6 This file is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
9 later version.
11 In addition to the permissions in the GNU General Public License, the
12 Free Software Foundation gives you unlimited permission to link the
13 compiled version of this file into combinations with other programs,
14 and to distribute those combinations without any restriction coming
15 from the use of this file. (The General Public License restrictions
16 do apply in other respects; for example, they cover modification of
17 the file, and distribution when not linked into a combine
18 executable.)
20 This file is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; see the file COPYING. If not, write to
27 the Free Software Foundation, 59 Temple Place - Suite 330,
28 Boston, MA 02111-1307, USA. */
29 /* ------------------------------------------------------------------------ */
31 /* We need to know what prefix to add to function names. */
33 #ifndef __USER_LABEL_PREFIX__
34 #error __USER_LABEL_PREFIX__ not defined
35 #endif
37 /* ANSI concatenation macros. */
39 #define CONCAT1(a, b) CONCAT2(a, b)
40 #define CONCAT2(a, b) a ## b
42 /* Use the right prefix for global labels. */
44 #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
46 #ifdef __ELF__
47 #ifdef __thumb__
48 #define __PLT__ /* Not supported in Thumb assembler (for now). */
49 #else
50 #define __PLT__ (PLT)
51 #endif
52 #define TYPE(x) .type SYM(x),function
53 #define SIZE(x) .size SYM(x), . - SYM(x)
54 #define LSYM(x) .x
55 #else
56 #define __PLT__
57 #define TYPE(x)
58 #define SIZE(x)
59 #define LSYM(x) x
60 #endif
62 /* Function end macros. Variants for 26 bit APCS and interworking. */
64 @ This selects the minimum architecture level required.
65 #define __ARM_ARCH__ 3
67 #if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
68 || defined(__ARM_ARCH_4T__)
69 /* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
70 long multiply instructions. That includes v3M. */
71 # undef __ARM_ARCH__
72 # define __ARM_ARCH__ 4
73 #endif
75 #if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
76 || defined(__ARM_ARCH_5TE__)
77 # undef __ARM_ARCH__
78 # define __ARM_ARCH__ 5
79 #endif
81 /* How to return from a function call depends on the architecture variant. */
83 #ifdef __APCS_26__
85 # define RET movs pc, lr
86 # define RETc(x) mov##x##s pc, lr
88 #elif (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
90 # define RET bx lr
91 # define RETc(x) bx##x lr
93 # if (__ARM_ARCH__ == 4) \
94 && (defined(__thumb__) || defined(__THUMB_INTERWORK__))
95 # define __INTERWORKING__
96 # endif
98 #else
100 # define RET mov pc, lr
101 # define RETc(x) mov##x pc, lr
103 #endif
105 /* Don't pass dirn, it's there just to get token pasting right. */
107 .macro RETLDM regs=, cond=, dirn=ia
108 #ifdef __APCS_26__
109 .ifc "\regs",""
110 ldm\cond\dirn sp!, {pc}^
111 .else
112 ldm\cond\dirn sp!, {\regs, pc}^
113 .endif
114 #elif defined (__INTERWORKING__)
115 .ifc "\regs",""
116 ldr\cond lr, [sp], #4
117 .else
118 ldm\cond\dirn sp!, {\regs, lr}
119 .endif
120 bx\cond lr
121 #else
122 .ifc "\regs",""
123 ldr\cond pc, [sp], #4
124 .else
125 ldm\cond\dirn sp!, {\regs, pc}
126 .endif
127 #endif
128 .endm
131 .macro ARM_LDIV0
132 LSYM(Ldiv0):
133 str lr, [sp, #-4]!
134 bl SYM (__div0) __PLT__
135 mov r0, #0 @ About as wrong as it could be.
136 RETLDM
137 .endm
140 .macro THUMB_LDIV0
141 LSYM(Ldiv0):
142 push { lr }
143 bl SYM (__div0)
144 mov r0, #0 @ About as wrong as it could be.
145 #if defined (__INTERWORKING__)
146 pop { r1 }
147 bx r1
148 #else
149 pop { pc }
150 #endif
151 .endm
153 .macro FUNC_END name
154 SIZE (__\name)
155 .endm
157 .macro DIV_FUNC_END name
158 LSYM(Ldiv0):
159 #ifdef __thumb__
160 THUMB_LDIV0
161 #else
162 ARM_LDIV0
163 #endif
164 FUNC_END \name
165 .endm
167 .macro THUMB_FUNC_START name
168 .globl SYM (\name)
169 TYPE (\name)
170 .thumb_func
171 SYM (\name):
172 .endm
174 /* Function start macros. Variants for ARM and Thumb. */
176 #ifdef __thumb__
177 #define THUMB_FUNC .thumb_func
178 #define THUMB_CODE .force_thumb
179 #else
180 #define THUMB_FUNC
181 #define THUMB_CODE
182 #endif
184 .macro FUNC_START name
185 .text
186 .globl SYM (__\name)
187 TYPE (__\name)
188 .align 0
189 THUMB_CODE
190 THUMB_FUNC
191 SYM (__\name):
192 .endm
194 /* Special function that will always be coded in ARM assembly, even if
195 in Thumb-only compilation. */
197 #if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
198 .macro ARM_FUNC_START name
199 FUNC_START \name
200 bx pc
202 .arm
203 _L__\name: /* A hook to tell gdb that we've switched to ARM */
204 .endm
205 #else
206 .macro ARM_FUNC_START name
207 FUNC_START \name
208 .endm
209 #endif
211 #ifdef __thumb__
212 /* Register aliases. */
214 work .req r4 @ XXXX is this safe ?
215 dividend .req r0
216 divisor .req r1
217 overdone .req r2
218 result .req r2
219 curbit .req r3
220 #endif
221 #if 0
222 ip .req r12
223 sp .req r13
224 lr .req r14
225 pc .req r15
226 #endif
228 /* ------------------------------------------------------------------------ */
229 /* Bodies of the division and modulo routines. */
230 /* ------------------------------------------------------------------------ */
231 .macro ARM_DIV_BODY dividend, divisor, result, curbit
233 #if __ARM_ARCH__ >= 5
235 clz \curbit, \divisor
236 clz \result, \dividend
237 sub \result, \curbit, \result
238 mov \curbit, #1
239 mov \divisor, \divisor, lsl \result
240 mov \curbit, \curbit, lsl \result
241 mov \result, #0
243 #else
245 @ Initially shift the divisor left 3 bits if possible,
246 @ set curbit accordingly. This allows for curbit to be located
247 @ at the left end of each 4 bit nibbles in the division loop
248 @ to save one loop in most cases.
249 tst \divisor, #0xe0000000
250 moveq \divisor, \divisor, lsl #3
251 moveq \curbit, #8
252 movne \curbit, #1
254 @ Unless the divisor is very big, shift it up in multiples of
255 @ four bits, since this is the amount of unwinding in the main
256 @ division loop. Continue shifting until the divisor is
257 @ larger than the dividend.
258 1: cmp \divisor, #0x10000000
259 cmplo \divisor, \dividend
260 movlo \divisor, \divisor, lsl #4
261 movlo \curbit, \curbit, lsl #4
262 blo 1b
264 @ For very big divisors, we must shift it a bit at a time, or
265 @ we will be in danger of overflowing.
266 1: cmp \divisor, #0x80000000
267 cmplo \divisor, \dividend
268 movlo \divisor, \divisor, lsl #1
269 movlo \curbit, \curbit, lsl #1
270 blo 1b
272 mov \result, #0
274 #endif
276 @ Division loop
277 1: cmp \dividend, \divisor
278 subhs \dividend, \dividend, \divisor
279 orrhs \result, \result, \curbit
280 cmp \dividend, \divisor, lsr #1
281 subhs \dividend, \dividend, \divisor, lsr #1
282 orrhs \result, \result, \curbit, lsr #1
283 cmp \dividend, \divisor, lsr #2
284 subhs \dividend, \dividend, \divisor, lsr #2
285 orrhs \result, \result, \curbit, lsr #2
286 cmp \dividend, \divisor, lsr #3
287 subhs \dividend, \dividend, \divisor, lsr #3
288 orrhs \result, \result, \curbit, lsr #3
289 cmp \dividend, #0 @ Early termination?
290 movnes \curbit, \curbit, lsr #4 @ No, any more bits to do?
291 movne \divisor, \divisor, lsr #4
292 bne 1b
294 .endm
295 /* ------------------------------------------------------------------------ */
296 .macro ARM_DIV2_ORDER divisor, order
298 #if __ARM_ARCH__ >= 5
300 clz \order, \divisor
301 rsb \order, \order, #31
303 #else
305 cmp \divisor, #(1 << 16)
306 movhs \divisor, \divisor, lsr #16
307 movhs \order, #16
308 movlo \order, #0
310 cmp \divisor, #(1 << 8)
311 movhs \divisor, \divisor, lsr #8
312 addhs \order, \order, #8
314 cmp \divisor, #(1 << 4)
315 movhs \divisor, \divisor, lsr #4
316 addhs \order, \order, #4
318 cmp \divisor, #(1 << 2)
319 addhi \order, \order, #3
320 addls \order, \order, \divisor, lsr #1
322 #endif
324 .endm
325 /* ------------------------------------------------------------------------ */
326 .macro ARM_MOD_BODY dividend, divisor, order, spare
328 #if __ARM_ARCH__ >= 5
330 clz \order, \divisor
331 clz \spare, \dividend
332 sub \order, \order, \spare
333 mov \divisor, \divisor, lsl \order
335 #else
337 mov \order, #0
339 @ Unless the divisor is very big, shift it up in multiples of
340 @ four bits, since this is the amount of unwinding in the main
341 @ division loop. Continue shifting until the divisor is
342 @ larger than the dividend.
343 1: cmp \divisor, #0x10000000
344 cmplo \divisor, \dividend
345 movlo \divisor, \divisor, lsl #4
346 addlo \order, \order, #4
347 blo 1b
349 @ For very big divisors, we must shift it a bit at a time, or
350 @ we will be in danger of overflowing.
351 1: cmp \divisor, #0x80000000
352 cmplo \divisor, \dividend
353 movlo \divisor, \divisor, lsl #1
354 addlo \order, \order, #1
355 blo 1b
357 #endif
359 @ Perform all needed substractions to keep only the reminder.
360 @ Do comparisons in batch of 4 first.
361 subs \order, \order, #3 @ yes, 3 is intended here
362 blt 2f
364 1: cmp \dividend, \divisor
365 subhs \dividend, \dividend, \divisor
366 cmp \dividend, \divisor, lsr #1
367 subhs \dividend, \dividend, \divisor, lsr #1
368 cmp \dividend, \divisor, lsr #2
369 subhs \dividend, \dividend, \divisor, lsr #2
370 cmp \dividend, \divisor, lsr #3
371 subhs \dividend, \dividend, \divisor, lsr #3
372 cmp \dividend, #1
373 mov \divisor, \divisor, lsr #4
374 subges \order, \order, #4
375 bge 1b
377 tst \order, #3
378 teqne \dividend, #0
379 beq 5f
381 @ Either 1, 2 or 3 comparison/substractions are left.
382 2: cmn \order, #2
383 blt 4f
384 beq 3f
385 cmp \dividend, \divisor
386 subhs \dividend, \dividend, \divisor
387 mov \divisor, \divisor, lsr #1
388 3: cmp \dividend, \divisor
389 subhs \dividend, \dividend, \divisor
390 mov \divisor, \divisor, lsr #1
391 4: cmp \dividend, \divisor
392 subhs \dividend, \dividend, \divisor
394 .endm
395 /* ------------------------------------------------------------------------ */
396 .macro THUMB_DIV_MOD_BODY modulo
397 @ Load the constant 0x10000000 into our work register.
398 mov work, #1
399 lsl work, #28
400 LSYM(Loop1):
401 @ Unless the divisor is very big, shift it up in multiples of
402 @ four bits, since this is the amount of unwinding in the main
403 @ division loop. Continue shifting until the divisor is
404 @ larger than the dividend.
405 cmp divisor, work
406 bhs LSYM(Lbignum)
407 cmp divisor, dividend
408 bhs LSYM(Lbignum)
409 lsl divisor, #4
410 lsl curbit, #4
411 b LSYM(Loop1)
412 LSYM(Lbignum):
413 @ Set work to 0x80000000
414 lsl work, #3
415 LSYM(Loop2):
416 @ For very big divisors, we must shift it a bit at a time, or
417 @ we will be in danger of overflowing.
418 cmp divisor, work
419 bhs LSYM(Loop3)
420 cmp divisor, dividend
421 bhs LSYM(Loop3)
422 lsl divisor, #1
423 lsl curbit, #1
424 b LSYM(Loop2)
425 LSYM(Loop3):
426 @ Test for possible subtractions ...
427 .if \modulo
428 @ ... On the final pass, this may subtract too much from the dividend,
429 @ so keep track of which subtractions are done, we can fix them up
430 @ afterwards.
431 mov overdone, #0
432 cmp dividend, divisor
433 blo LSYM(Lover1)
434 sub dividend, dividend, divisor
435 LSYM(Lover1):
436 lsr work, divisor, #1
437 cmp dividend, work
438 blo LSYM(Lover2)
439 sub dividend, dividend, work
440 mov ip, curbit
441 mov work, #1
442 ror curbit, work
443 orr overdone, curbit
444 mov curbit, ip
445 LSYM(Lover2):
446 lsr work, divisor, #2
447 cmp dividend, work
448 blo LSYM(Lover3)
449 sub dividend, dividend, work
450 mov ip, curbit
451 mov work, #2
452 ror curbit, work
453 orr overdone, curbit
454 mov curbit, ip
455 LSYM(Lover3):
456 lsr work, divisor, #3
457 cmp dividend, work
458 blo LSYM(Lover4)
459 sub dividend, dividend, work
460 mov ip, curbit
461 mov work, #3
462 ror curbit, work
463 orr overdone, curbit
464 mov curbit, ip
465 LSYM(Lover4):
466 mov ip, curbit
467 .else
468 @ ... and note which bits are done in the result. On the final pass,
469 @ this may subtract too much from the dividend, but the result will be ok,
470 @ since the "bit" will have been shifted out at the bottom.
471 cmp dividend, divisor
472 blo LSYM(Lover1)
473 sub dividend, dividend, divisor
474 orr result, result, curbit
475 LSYM(Lover1):
476 lsr work, divisor, #1
477 cmp dividend, work
478 blo LSYM(Lover2)
479 sub dividend, dividend, work
480 lsr work, curbit, #1
481 orr result, work
482 LSYM(Lover2):
483 lsr work, divisor, #2
484 cmp dividend, work
485 blo LSYM(Lover3)
486 sub dividend, dividend, work
487 lsr work, curbit, #2
488 orr result, work
489 LSYM(Lover3):
490 lsr work, divisor, #3
491 cmp dividend, work
492 blo LSYM(Lover4)
493 sub dividend, dividend, work
494 lsr work, curbit, #3
495 orr result, work
496 LSYM(Lover4):
497 .endif
499 cmp dividend, #0 @ Early termination?
500 beq LSYM(Lover5)
501 lsr curbit, #4 @ No, any more bits to do?
502 beq LSYM(Lover5)
503 lsr divisor, #4
504 b LSYM(Loop3)
505 LSYM(Lover5):
506 .if \modulo
507 @ Any subtractions that we should not have done will be recorded in
508 @ the top three bits of "overdone". Exactly which were not needed
509 @ are governed by the position of the bit, stored in ip.
510 mov work, #0xe
511 lsl work, #28
512 and overdone, work
513 beq LSYM(Lgot_result)
515 @ If we terminated early, because dividend became zero, then the
516 @ bit in ip will not be in the bottom nibble, and we should not
517 @ perform the additions below. We must test for this though
518 @ (rather relying upon the TSTs to prevent the additions) since
519 @ the bit in ip could be in the top two bits which might then match
520 @ with one of the smaller RORs.
521 mov curbit, ip
522 mov work, #0x7
523 tst curbit, work
524 beq LSYM(Lgot_result)
526 mov curbit, ip
527 mov work, #3
528 ror curbit, work
529 tst overdone, curbit
530 beq LSYM(Lover6)
531 lsr work, divisor, #3
532 add dividend, work
533 LSYM(Lover6):
534 mov curbit, ip
535 mov work, #2
536 ror curbit, work
537 tst overdone, curbit
538 beq LSYM(Lover7)
539 lsr work, divisor, #2
540 add dividend, work
541 LSYM(Lover7):
542 mov curbit, ip
543 mov work, #1
544 ror curbit, work
545 tst overdone, curbit
546 beq LSYM(Lgot_result)
547 lsr work, divisor, #1
548 add dividend, work
549 .endif
550 LSYM(Lgot_result):
551 .endm
552 /* ------------------------------------------------------------------------ */
553 /* Start of the Real Functions */
554 /* ------------------------------------------------------------------------ */
555 #ifdef L_udivsi3
557 FUNC_START udivsi3
559 #ifdef __thumb__
561 cmp divisor, #0
562 beq LSYM(Ldiv0)
563 mov curbit, #1
564 mov result, #0
566 push { work }
567 cmp dividend, divisor
568 blo LSYM(Lgot_result)
570 THUMB_DIV_MOD_BODY 0
572 mov r0, result
573 pop { work }
576 #else /* ARM version. */
578 subs r2, r1, #1
579 RETc(eq)
580 bcc LSYM(Ldiv0)
581 cmp r0, r1
582 bls 11f
583 tst r1, r2
584 beq 12f
586 ARM_DIV_BODY r0, r1, r2, r3
588 mov r0, r2
589 RET
591 11: moveq r0, #1
592 movne r0, #0
595 12: ARM_DIV2_ORDER r1, r2
597 mov r0, r0, lsr r2
600 #endif /* ARM version */
602 DIV_FUNC_END udivsi3
604 #endif /* L_udivsi3 */
605 /* ------------------------------------------------------------------------ */
606 #ifdef L_umodsi3
608 FUNC_START umodsi3
610 #ifdef __thumb__
612 cmp divisor, #0
613 beq LSYM(Ldiv0)
614 mov curbit, #1
615 cmp dividend, divisor
616 bhs LSYM(Lover10)
617 RET
619 LSYM(Lover10):
620 push { work }
622 THUMB_DIV_MOD_BODY 1
624 pop { work }
627 #else /* ARM version. */
629 subs r2, r1, #1 @ compare divisor with 1
630 bcc LSYM(Ldiv0)
631 cmpne r0, r1 @ compare dividend with divisor
632 moveq r0, #0
633 tsthi r1, r2 @ see if divisor is power of 2
634 andeq r0, r0, r2
635 RETc(ls)
637 ARM_MOD_BODY r0, r1, r2, r3
639 RET
641 #endif /* ARM version. */
643 DIV_FUNC_END umodsi3
645 #endif /* L_umodsi3 */
646 /* ------------------------------------------------------------------------ */
647 #ifdef L_divsi3
649 FUNC_START divsi3
651 #ifdef __thumb__
652 cmp divisor, #0
653 beq LSYM(Ldiv0)
655 push { work }
656 mov work, dividend
657 eor work, divisor @ Save the sign of the result.
658 mov ip, work
659 mov curbit, #1
660 mov result, #0
661 cmp divisor, #0
662 bpl LSYM(Lover10)
663 neg divisor, divisor @ Loops below use unsigned.
664 LSYM(Lover10):
665 cmp dividend, #0
666 bpl LSYM(Lover11)
667 neg dividend, dividend
668 LSYM(Lover11):
669 cmp dividend, divisor
670 blo LSYM(Lgot_result)
672 THUMB_DIV_MOD_BODY 0
674 mov r0, result
675 mov work, ip
676 cmp work, #0
677 bpl LSYM(Lover12)
678 neg r0, r0
679 LSYM(Lover12):
680 pop { work }
683 #else /* ARM version. */
685 cmp r1, #0
686 eor ip, r0, r1 @ save the sign of the result.
687 beq LSYM(Ldiv0)
688 rsbmi r1, r1, #0 @ loops below use unsigned.
689 subs r2, r1, #1 @ division by 1 or -1 ?
690 beq 10f
691 movs r3, r0
692 rsbmi r3, r0, #0 @ positive dividend value
693 cmp r3, r1
694 bls 11f
695 tst r1, r2 @ divisor is power of 2 ?
696 beq 12f
698 ARM_DIV_BODY r3, r1, r0, r2
700 cmp ip, #0
701 rsbmi r0, r0, #0
702 RET
704 10: teq ip, r0 @ same sign ?
705 rsbmi r0, r0, #0
706 RET
708 11: movlo r0, #0
709 moveq r0, ip, asr #31
710 orreq r0, r0, #1
713 12: ARM_DIV2_ORDER r1, r2
715 cmp ip, #0
716 mov r0, r3, lsr r2
717 rsbmi r0, r0, #0
720 #endif /* ARM version */
722 DIV_FUNC_END divsi3
724 #endif /* L_divsi3 */
725 /* ------------------------------------------------------------------------ */
726 #ifdef L_modsi3
728 FUNC_START modsi3
730 #ifdef __thumb__
732 mov curbit, #1
733 cmp divisor, #0
734 beq LSYM(Ldiv0)
735 bpl LSYM(Lover10)
736 neg divisor, divisor @ Loops below use unsigned.
737 LSYM(Lover10):
738 push { work }
739 @ Need to save the sign of the dividend, unfortunately, we need
740 @ work later on. Must do this after saving the original value of
741 @ the work register, because we will pop this value off first.
742 push { dividend }
743 cmp dividend, #0
744 bpl LSYM(Lover11)
745 neg dividend, dividend
746 LSYM(Lover11):
747 cmp dividend, divisor
748 blo LSYM(Lgot_result)
750 THUMB_DIV_MOD_BODY 1
752 pop { work }
753 cmp work, #0
754 bpl LSYM(Lover12)
755 neg dividend, dividend
756 LSYM(Lover12):
757 pop { work }
758 RET
760 #else /* ARM version. */
762 cmp r1, #0
763 beq LSYM(Ldiv0)
764 rsbmi r1, r1, #0 @ loops below use unsigned.
765 movs ip, r0 @ preserve sign of dividend
766 rsbmi r0, r0, #0 @ if negative make positive
767 subs r2, r1, #1 @ compare divisor with 1
768 cmpne r0, r1 @ compare dividend with divisor
769 moveq r0, #0
770 tsthi r1, r2 @ see if divisor is power of 2
771 andeq r0, r0, r2
772 bls 10f
774 ARM_MOD_BODY r0, r1, r2, r3
776 10: cmp ip, #0
777 rsbmi r0, r0, #0
778 RET
780 #endif /* ARM version */
782 DIV_FUNC_END modsi3
784 #endif /* L_modsi3 */
785 /* ------------------------------------------------------------------------ */
786 #ifdef L_dvmd_tls
788 FUNC_START div0
792 FUNC_END div0
794 #endif /* L_divmodsi_tools */
795 /* ------------------------------------------------------------------------ */
796 #ifdef L_dvmd_lnx
797 @ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
799 /* Constants taken from <asm/unistd.h> and <asm/signal.h> */
800 #define SIGFPE 8
801 #define __NR_SYSCALL_BASE 0x900000
802 #define __NR_getpid (__NR_SYSCALL_BASE+ 20)
803 #define __NR_kill (__NR_SYSCALL_BASE+ 37)
805 .code 32
806 FUNC_START div0
808 stmfd sp!, {r1, lr}
809 swi __NR_getpid
810 cmn r0, #1000
811 RETLDM r1 hs
812 mov r1, #SIGFPE
813 swi __NR_kill
814 RETLDM r1
816 FUNC_END div0
818 #endif /* L_dvmd_lnx */
819 /* ------------------------------------------------------------------------ */
820 /* These next two sections are here despite the fact that they contain Thumb
821 assembler because their presence allows interworked code to be linked even
822 when the GCC library is this one. */
824 /* Do not build the interworking functions when the target architecture does
825 not support Thumb instructions. (This can be a multilib option). */
826 #if defined L_call_via_rX && (defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__)
828 /* These labels & instructions are used by the Arm/Thumb interworking code.
829 The address of function to be called is loaded into a register and then
830 one of these labels is called via a BL instruction. This puts the
831 return address into the link register with the bottom bit set, and the
832 code here switches to the correct mode before executing the function. */
834 .text
835 .align 0
836 .force_thumb
838 .macro call_via register
839 THUMB_FUNC_START _call_via_\register
841 bx \register
844 SIZE (_call_via_\register)
845 .endm
847 call_via r0
848 call_via r1
849 call_via r2
850 call_via r3
851 call_via r4
852 call_via r5
853 call_via r6
854 call_via r7
855 call_via r8
856 call_via r9
857 call_via sl
858 call_via fp
859 call_via ip
860 call_via sp
861 call_via lr
863 #endif /* L_call_via_rX */
864 /* ------------------------------------------------------------------------ */
865 /* Do not build the interworking functions when the target architecture does
866 not support Thumb instructions. (This can be a multilib option). */
867 #if defined L_interwork_call_via_rX && (defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__ || defined __ARM_ARCH_5TE__)
869 /* These labels & instructions are used by the Arm/Thumb interworking code,
870 when the target address is in an unknown instruction set. The address
871 of function to be called is loaded into a register and then one of these
872 labels is called via a BL instruction. This puts the return address
873 into the link register with the bottom bit set, and the code here
874 switches to the correct mode before executing the function. Unfortunately
875 the target code cannot be relied upon to return via a BX instruction, so
876 instead we have to store the resturn address on the stack and allow the
877 called function to return here instead. Upon return we recover the real
878 return address and use a BX to get back to Thumb mode. */
880 .text
881 .align 0
883 .code 32
884 .globl _arm_return
885 _arm_return:
886 RETLDM
887 .code 16
889 .macro interwork register
890 .code 16
892 THUMB_FUNC_START _interwork_call_via_\register
894 bx pc
897 .code 32
898 .globl LSYM(Lchange_\register)
899 LSYM(Lchange_\register):
900 tst \register, #1
901 streq lr, [sp, #-4]!
902 adreq lr, _arm_return
903 bx \register
905 SIZE (_interwork_call_via_\register)
906 .endm
908 interwork r0
909 interwork r1
910 interwork r2
911 interwork r3
912 interwork r4
913 interwork r5
914 interwork r6
915 interwork r7
916 interwork r8
917 interwork r9
918 interwork sl
919 interwork fp
920 interwork ip
921 interwork sp
923 /* The LR case has to be handled a little differently... */
924 .code 16
926 THUMB_FUNC_START _interwork_call_via_lr
928 bx pc
931 .code 32
932 .globl .Lchange_lr
933 .Lchange_lr:
934 tst lr, #1
935 stmeqdb r13!, {lr}
936 mov ip, lr
937 adreq lr, _arm_return
938 bx ip
940 SIZE (_interwork_call_via_lr)
942 #endif /* L_interwork_call_via_rX */
944 #include "ieee754-df.S"
945 #include "ieee754-sf.S"