1 /* -*- Mode: Asm -*- */
2 /* Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
3 Contributed by Denis Chertykov <denisc@overta.ru>
5 This file is free software; you can redistribute it and/or modify it
6 under the terms of the GNU General Public License as published by the
7 Free Software Foundation; either version 2, or (at your option) any
10 In addition to the permissions in the GNU General Public License, the
11 Free Software Foundation gives you unlimited permission to link the
12 compiled version of this file into combinations with other programs,
13 and to distribute those combinations without any restriction coming
14 from the use of this file. (The General Public License restrictions
15 do apply in other respects; for example, they cover modification of
16 the file, and distribution when not linked into a combine
19 This file is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
27 Boston, MA 02110-1301, USA. */
29 #define __zero_reg__ r1
30 #define __tmp_reg__ r0
35 /* Most of the functions here are called directly from avr.md
36 patterns, instead of using the standard libcall mechanisms.
37 This can make better code because GCC knows exactly which
38 of the call-used registers (not all of them) are clobbered. */
40 .section .text.libgcc, "ax", @progbits
42 .macro mov_l r_dest, r_src
43 #if defined (__AVR_ENHANCED__)
50 .macro mov_h r_dest, r_src
51 #if defined (__AVR_ENHANCED__)
58 /* Note: mulqi3, mulhi3 are open-coded on the enhanced core. */
59 #if !defined (__AVR_ENHANCED__)
60 /*******************************************************
62 *******************************************************/
63 #if defined (L_mulqi3)
65 #define r_arg2 r22 /* multiplicand */
66 #define r_arg1 r24 /* multiplier */
67 #define r_res __tmp_reg__ /* result */
72 clr r_res ; clear result
76 add r_arg2,r_arg2 ; shift multiplicand
77 breq __mulqi3_exit ; while multiplicand != 0
79 brne __mulqi3_loop ; exit if multiplier = 0
81 mov r_arg1,r_res ; result to return register
89 #endif /* defined (L_mulqi3) */
91 #if defined (L_mulqihi3)
103 #endif /* defined (L_mulqihi3) */
105 #if defined (L_umulqihi3)
113 #endif /* defined (L_umulqihi3) */
115 /*******************************************************
116 Multiplication 16 x 16
117 *******************************************************/
118 #if defined (L_mulhi3)
119 #define r_arg1L r24 /* multiplier Low */
120 #define r_arg1H r25 /* multiplier High */
121 #define r_arg2L r22 /* multiplicand Low */
122 #define r_arg2H r23 /* multiplicand High */
123 #define r_resL __tmp_reg__ /* result Low */
124 #define r_resH r21 /* result High */
129 clr r_resH ; clear result
130 clr r_resL ; clear result
134 add r_resL,r_arg2L ; result + multiplicand
137 add r_arg2L,r_arg2L ; shift multiplicand
140 cp r_arg2L,__zero_reg__
141 cpc r_arg2H,__zero_reg__
142 breq __mulhi3_exit ; while multiplicand != 0
144 lsr r_arg1H ; gets LSB of multiplier
147 brne __mulhi3_loop ; exit if multiplier = 0
149 mov r_arg1H,r_resH ; result to return register
161 #endif /* defined (L_mulhi3) */
162 #endif /* !defined (__AVR_ENHANCED__) */
164 #if defined (L_mulhisi3)
180 #endif /* defined (L_mulhisi3) */
182 #if defined (L_umulhisi3)
194 #endif /* defined (L_umulhisi3) */
196 #if defined (L_mulsi3)
197 /*******************************************************
198 Multiplication 32 x 32
199 *******************************************************/
200 #define r_arg1L r22 /* multiplier Low */
203 #define r_arg1HH r25 /* multiplier High */
206 #define r_arg2L r18 /* multiplicand Low */
209 #define r_arg2HH r21 /* multiplicand High */
211 #define r_resL r26 /* result Low */
214 #define r_resHH r31 /* result High */
220 #if defined (__AVR_ENHANCED__)
225 mul r_arg1HL, r_arg2L
228 mul r_arg1L, r_arg2HL
231 mul r_arg1HH, r_arg2L
233 mul r_arg1HL, r_arg2H
235 mul r_arg1H, r_arg2HL
237 mul r_arg1L, r_arg2HH
239 clr r_arg1HH ; use instead of __zero_reg__ to add carry
243 adc r_resHH, r_arg1HH ; add carry
247 adc r_resHH, r_arg1HH ; add carry
249 movw r_arg1HL, r_resHL
250 clr r1 ; __zero_reg__ clobbered by "mul"
253 clr r_resHH ; clear result
254 clr r_resHL ; clear result
255 clr r_resH ; clear result
256 clr r_resL ; clear result
260 add r_resL,r_arg2L ; result + multiplicand
265 add r_arg2L,r_arg2L ; shift multiplicand
267 adc r_arg2HL,r_arg2HL
268 adc r_arg2HH,r_arg2HH
270 lsr r_arg1HH ; gets LSB of multiplier
277 brne __mulsi3_loop ; exit if multiplier = 0
279 mov r_arg1HH,r_resHH ; result to return register
284 #endif /* !defined (__AVR_ENHANCED__) */
302 #endif /* defined (L_mulsi3) */
304 /*******************************************************
305 Division 8 / 8 => (result + remainder)
306 *******************************************************/
307 #define r_rem r25 /* remainder */
308 #define r_arg1 r24 /* dividend, quotient */
309 #define r_arg2 r22 /* divisor */
310 #define r_cnt r23 /* loop count */
312 #if defined (L_udivmodqi4)
316 sub r_rem,r_rem ; clear remainder and carry
317 ldi r_cnt,9 ; init loop counter
318 rjmp __udivmodqi4_ep ; jump to entry point
320 rol r_rem ; shift dividend into remainder
321 cp r_rem,r_arg2 ; compare remainder & divisor
322 brcs __udivmodqi4_ep ; remainder <= divisor
323 sub r_rem,r_arg2 ; restore remainder
325 rol r_arg1 ; shift dividend (with CARRY)
326 dec r_cnt ; decrement loop counter
327 brne __udivmodqi4_loop
328 com r_arg1 ; complement result
329 ; because C flag was complemented in loop
332 #endif /* defined (L_udivmodqi4) */
334 #if defined (L_divmodqi4)
338 bst r_arg1,7 ; store sign of dividend
339 mov __tmp_reg__,r_arg1
340 eor __tmp_reg__,r_arg2; r0.7 is sign of result
342 neg r_arg1 ; dividend negative : negate
344 neg r_arg2 ; divisor negative : negate
345 rcall __udivmodqi4 ; do the unsigned div/mod
347 neg r_rem ; correct remainder sign
350 neg r_arg1 ; correct result sign
354 #endif /* defined (L_divmodqi4) */
362 /*******************************************************
363 Division 16 / 16 => (result + remainder)
364 *******************************************************/
365 #define r_remL r26 /* remainder Low */
366 #define r_remH r27 /* remainder High */
368 /* return: remainder */
369 #define r_arg1L r24 /* dividend Low */
370 #define r_arg1H r25 /* dividend High */
372 /* return: quotient */
373 #define r_arg2L r22 /* divisor Low */
374 #define r_arg2H r23 /* divisor High */
376 #define r_cnt r21 /* loop count */
378 #if defined (L_udivmodhi4)
383 sub r_remH,r_remH ; clear remainder and carry
384 ldi r_cnt,17 ; init loop counter
385 rjmp __udivmodhi4_ep ; jump to entry point
387 rol r_remL ; shift dividend into remainder
389 cp r_remL,r_arg2L ; compare remainder & divisor
391 brcs __udivmodhi4_ep ; remainder < divisor
392 sub r_remL,r_arg2L ; restore remainder
395 rol r_arg1L ; shift dividend (with CARRY)
397 dec r_cnt ; decrement loop counter
398 brne __udivmodhi4_loop
401 ; div/mod results to return registers, as for the div() function
402 mov_l r_arg2L, r_arg1L ; quotient
403 mov_h r_arg2H, r_arg1H
404 mov_l r_arg1L, r_remL ; remainder
405 mov_h r_arg1H, r_remH
408 #endif /* defined (L_udivmodhi4) */
410 #if defined (L_divmodhi4)
416 bst r_arg1H,7 ; store sign of dividend
417 mov __tmp_reg__,r_arg1H
418 eor __tmp_reg__,r_arg2H ; r0.7 is sign of result
419 rcall __divmodhi4_neg1 ; dividend negative : negate
421 rcall __divmodhi4_neg2 ; divisor negative : negate
422 rcall __udivmodhi4 ; do the unsigned div/mod
423 rcall __divmodhi4_neg1 ; correct remainder sign
425 brpl __divmodhi4_exit
428 neg r_arg2L ; correct divisor/result sign
433 brtc __divmodhi4_exit
435 neg r_arg1L ; correct dividend/remainder sign
439 #endif /* defined (L_divmodhi4) */
452 /*******************************************************
453 Division 32 / 32 => (result + remainder)
454 *******************************************************/
455 #define r_remHH r31 /* remainder High */
458 #define r_remL r26 /* remainder Low */
460 /* return: remainder */
461 #define r_arg1HH r25 /* dividend High */
464 #define r_arg1L r22 /* dividend Low */
466 /* return: quotient */
467 #define r_arg2HH r21 /* divisor High */
470 #define r_arg2L r18 /* divisor Low */
472 #define r_cnt __zero_reg__ /* loop count (0 after the loop!) */
474 #if defined (L_udivmodsi4)
478 ldi r_remL, 33 ; init loop counter
481 sub r_remH,r_remH ; clear remainder and carry
482 mov_l r_remHL, r_remL
483 mov_h r_remHH, r_remH
484 rjmp __udivmodsi4_ep ; jump to entry point
486 rol r_remL ; shift dividend into remainder
490 cp r_remL,r_arg2L ; compare remainder & divisor
494 brcs __udivmodsi4_ep ; remainder <= divisor
495 sub r_remL,r_arg2L ; restore remainder
500 rol r_arg1L ; shift dividend (with CARRY)
504 dec r_cnt ; decrement loop counter
505 brne __udivmodsi4_loop
506 ; __zero_reg__ now restored (r_cnt == 0)
511 ; div/mod results to return registers, as for the ldiv() function
512 mov_l r_arg2L, r_arg1L ; quotient
513 mov_h r_arg2H, r_arg1H
514 mov_l r_arg2HL, r_arg1HL
515 mov_h r_arg2HH, r_arg1HH
516 mov_l r_arg1L, r_remL ; remainder
517 mov_h r_arg1H, r_remH
518 mov_l r_arg1HL, r_remHL
519 mov_h r_arg1HH, r_remHH
522 #endif /* defined (L_udivmodsi4) */
524 #if defined (L_divmodsi4)
528 bst r_arg1HH,7 ; store sign of dividend
529 mov __tmp_reg__,r_arg1HH
530 eor __tmp_reg__,r_arg2HH ; r0.7 is sign of result
531 rcall __divmodsi4_neg1 ; dividend negative : negate
533 rcall __divmodsi4_neg2 ; divisor negative : negate
534 rcall __udivmodsi4 ; do the unsigned div/mod
535 rcall __divmodsi4_neg1 ; correct remainder sign
537 brcc __divmodsi4_exit
542 neg r_arg2L ; correct divisor/quotient sign
549 brtc __divmodsi4_exit
553 neg r_arg1L ; correct dividend/remainder sign
559 #endif /* defined (L_divmodsi4) */
561 /**********************************
562 * This is a prologue subroutine
563 **********************************/
564 #if defined (L_prologue)
566 .global __prologue_saves__
567 .func __prologue_saves__
591 in __tmp_reg__,__SREG__
594 out __SREG__,__tmp_reg__
598 #endif /* defined (L_prologue) */
601 * This is an epilogue subroutine
603 #if defined (L_epilogue)
605 .global __epilogue_restores__
606 .func __epilogue_restores__
607 __epilogue_restores__:
628 in __tmp_reg__,__SREG__
631 out __SREG__,__tmp_reg__
637 #endif /* defined (L_epilogue) */
640 .section .fini9,"ax",@progbits
647 /* Code from .fini8 ... .fini1 sections inserted by ld script. */
649 .section .fini0,"ax",@progbits
653 #endif /* defined (L_exit) */
661 #endif /* defined (L_cleanup) */
664 .global __tablejump2__
669 .global __tablejump__
671 #if defined (__AVR_ENHANCED__)
685 #endif /* defined (L_tablejump) */
687 /* __do_copy_data is only necessary if there is anything in .data section.
688 Does not use RAMPZ - crt*.o provides a replacement for >64K devices. */
691 .section .init4,"ax",@progbits
692 .global __do_copy_data
694 ldi r17, hi8(__data_end)
695 ldi r26, lo8(__data_start)
696 ldi r27, hi8(__data_start)
697 ldi r30, lo8(__data_load_start)
698 ldi r31, hi8(__data_load_start)
699 rjmp .do_copy_data_start
701 #if defined (__AVR_ENHANCED__)
709 cpi r26, lo8(__data_end)
711 brne .do_copy_data_loop
712 #endif /* L_copy_data */
714 /* __do_clear_bss is only necessary if there is anything in .bss section. */
717 .section .init4,"ax",@progbits
718 .global __do_clear_bss
720 ldi r17, hi8(__bss_end)
721 ldi r26, lo8(__bss_start)
722 ldi r27, hi8(__bss_start)
723 rjmp .do_clear_bss_start
727 cpi r26, lo8(__bss_end)
729 brne .do_clear_bss_loop
730 #endif /* L_clear_bss */
732 /* __do_global_ctors and __do_global_dtors are only necessary
733 if there are any constructors/destructors. */
735 #if defined (__AVR_MEGA__)
742 .section .init6,"ax",@progbits
743 .global __do_global_ctors
745 ldi r17, hi8(__ctors_start)
746 ldi r28, lo8(__ctors_end)
747 ldi r29, hi8(__ctors_end)
748 rjmp .do_global_ctors_start
749 .do_global_ctors_loop:
754 .do_global_ctors_start:
755 cpi r28, lo8(__ctors_start)
757 brne .do_global_ctors_loop
761 .section .fini6,"ax",@progbits
762 .global __do_global_dtors
764 ldi r17, hi8(__dtors_end)
765 ldi r28, lo8(__dtors_start)
766 ldi r29, hi8(__dtors_start)
767 rjmp .do_global_dtors_start
768 .do_global_dtors_loop:
773 .do_global_dtors_start:
774 cpi r28, lo8(__dtors_end)
776 brne .do_global_dtors_loop