2 * ARMv4 code generator for TCC
4 * Copyright (c) 2003 Daniel Glöckner
5 * Copyright (c) 2012 Thomas Preud'homme
7 * Based on i386-gen.c by Fabrice Bellard
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #ifdef TARGET_DEFS_ONLY
27 #ifndef TCC_ARM_VFP /* Avoid useless warning */
32 /* number of available registers */
39 #ifndef TCC_ARM_VERSION
40 # define TCC_ARM_VERSION 5
43 /* a register can belong to several classes. The classes must be
44 sorted from more general to more precise (see gv2() code which does
45 assumptions on it). */
46 #define RC_INT 0x0001 /* generic integer register */
47 #define RC_FLOAT 0x0002 /* generic float register */
63 #define RC_IRET RC_R0 /* function return: integer register */
64 #define RC_LRET RC_R1 /* function return: second integer register */
65 #define RC_FRET RC_F0 /* function return: float register */
67 /* pretty names for the registers */
87 #define T2CPR(t) (((t) & VT_BTYPE) != VT_FLOAT ? 0x100 : 0)
90 /* return registers for function */
91 #define REG_IRET TREG_R0 /* single word int return register */
92 #define REG_LRET TREG_R1 /* second word return register (for long long) */
93 #define REG_FRET TREG_F0 /* float return register */
96 #define TOK___divdi3 TOK___aeabi_ldivmod
97 #define TOK___moddi3 TOK___aeabi_ldivmod
98 #define TOK___udivdi3 TOK___aeabi_uldivmod
99 #define TOK___umoddi3 TOK___aeabi_uldivmod
102 /* defined if function parameters must be evaluated in reverse order */
103 #define INVERT_FUNC_PARAMS
105 /* defined if structures are passed as pointers. Otherwise structures
106 are directly pushed on stack. */
107 /* #define FUNC_STRUCT_PARAM_AS_PTR */
109 /* pointer size, in bytes */
112 /* long double size and alignment, in bytes */
114 #define LDOUBLE_SIZE 8
118 #define LDOUBLE_SIZE 8
122 #define LDOUBLE_ALIGN 8
124 #define LDOUBLE_ALIGN 4
127 /* maximum alignment (for aligned attribute support) */
130 #define CHAR_IS_UNSIGNED
132 /******************************************************/
135 #define EM_TCC_TARGET EM_ARM
137 /* relocation type for 32 bit data relocation */
138 #define R_DATA_32 R_ARM_ABS32
139 #define R_DATA_PTR R_ARM_ABS32
140 #define R_JMP_SLOT R_ARM_JUMP_SLOT
141 #define R_COPY R_ARM_COPY
143 #define ELF_START_ADDR 0x00008000
144 #define ELF_PAGE_SIZE 0x1000
146 /******************************************************/
147 #else /* ! TARGET_DEFS_ONLY */
148 /******************************************************/
151 ST_DATA
const int reg_classes
[NB_REGS
] = {
152 /* r0 */ RC_INT
| RC_R0
,
153 /* r1 */ RC_INT
| RC_R1
,
154 /* r2 */ RC_INT
| RC_R2
,
155 /* r3 */ RC_INT
| RC_R3
,
156 /* r12 */ RC_INT
| RC_R12
,
157 /* f0 */ RC_FLOAT
| RC_F0
,
158 /* f1 */ RC_FLOAT
| RC_F1
,
159 /* f2 */ RC_FLOAT
| RC_F2
,
160 /* f3 */ RC_FLOAT
| RC_F3
,
162 /* d4/s8 */ RC_FLOAT
| RC_F4
,
163 /* d5/s10 */ RC_FLOAT
| RC_F5
,
164 /* d6/s12 */ RC_FLOAT
| RC_F6
,
165 /* d7/s14 */ RC_FLOAT
| RC_F7
,
169 static int func_sub_sp_offset
, last_itod_magic
;
172 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
173 static CType float_type
, double_type
, func_float_type
, func_double_type
;
174 ST_FUNC
void arm_init_types(void)
176 float_type
.t
= VT_FLOAT
;
177 double_type
.t
= VT_DOUBLE
;
178 func_float_type
.t
= VT_FUNC
;
179 func_float_type
.ref
= sym_push(SYM_FIELD
, &float_type
, FUNC_CDECL
, FUNC_OLD
);
180 func_double_type
.t
= VT_FUNC
;
181 func_double_type
.ref
= sym_push(SYM_FIELD
, &double_type
, FUNC_CDECL
, FUNC_OLD
);
184 #define func_float_type func_old_type
185 #define func_double_type func_old_type
186 #define func_ldouble_type func_old_type
187 ST_FUNC
void arm_init_types(void) {}
190 static int two2mask(int a
,int b
) {
191 return (reg_classes
[a
]|reg_classes
[b
])&~(RC_INT
|RC_FLOAT
);
194 static int regmask(int r
) {
195 return reg_classes
[r
]&~(RC_INT
|RC_FLOAT
);
198 /******************************************************/
202 /* this is a good place to start adding big-endian support*/
206 if (!cur_text_section
)
207 tcc_error("compiler error! This happens f.ex. if the compiler\n"
208 "can't evaluate constant expressions outside of a function.");
209 if (ind1
> cur_text_section
->data_allocated
)
210 section_realloc(cur_text_section
, ind1
);
211 cur_text_section
->data
[ind
++] = i
&255;
213 cur_text_section
->data
[ind
++] = i
&255;
215 cur_text_section
->data
[ind
++] = i
&255;
217 cur_text_section
->data
[ind
++] = i
;
220 static uint32_t stuff_const(uint32_t op
, uint32_t c
)
223 uint32_t nc
= 0, negop
= 0;
233 case 0x1A00000: //mov
234 case 0x1E00000: //mvn
241 return (op
&0xF010F000)|((op
>>16)&0xF)|0x1E00000;
245 return (op
&0xF010F000)|((op
>>16)&0xF)|0x1A00000;
246 case 0x1C00000: //bic
251 case 0x1800000: //orr
253 return (op
&0xFFF0FFFF)|0x1E00000;
259 if(c
<256) /* catch undefined <<32 */
262 m
=(0xff>>i
)|(0xff<<(32-i
));
264 return op
|(i
<<7)|(c
<<i
)|(c
>>(32-i
));
274 void stuff_const_harder(uint32_t op
, uint32_t v
) {
280 uint32_t a
[16], nv
, no
, o2
, n2
;
283 o2
=(op
&0xfff0ffff)|((op
&0xf000)<<4);;
285 a
[i
]=(a
[i
-1]>>2)|(a
[i
-1]<<30);
287 for(j
=i
<4?i
+12:15;j
>=i
+4;j
--)
288 if((v
&(a
[i
]|a
[j
]))==v
) {
289 o(stuff_const(op
,v
&a
[i
]));
290 o(stuff_const(o2
,v
&a
[j
]));
297 for(j
=i
<4?i
+12:15;j
>=i
+4;j
--)
298 if((nv
&(a
[i
]|a
[j
]))==nv
) {
299 o(stuff_const(no
,nv
&a
[i
]));
300 o(stuff_const(n2
,nv
&a
[j
]));
305 for(k
=i
<4?i
+12:15;k
>=j
+4;k
--)
306 if((v
&(a
[i
]|a
[j
]|a
[k
]))==v
) {
307 o(stuff_const(op
,v
&a
[i
]));
308 o(stuff_const(o2
,v
&a
[j
]));
309 o(stuff_const(o2
,v
&a
[k
]));
316 for(k
=i
<4?i
+12:15;k
>=j
+4;k
--)
317 if((nv
&(a
[i
]|a
[j
]|a
[k
]))==nv
) {
318 o(stuff_const(no
,nv
&a
[i
]));
319 o(stuff_const(n2
,nv
&a
[j
]));
320 o(stuff_const(n2
,nv
&a
[k
]));
323 o(stuff_const(op
,v
&a
[0]));
324 o(stuff_const(o2
,v
&a
[4]));
325 o(stuff_const(o2
,v
&a
[8]));
326 o(stuff_const(o2
,v
&a
[12]));
330 ST_FUNC
uint32_t encbranch(int pos
, int addr
, int fail
)
334 if(addr
>=0x1000000 || addr
<-0x1000000) {
336 tcc_error("FIXME: function bigger than 32MB");
339 return 0x0A000000|(addr
&0xffffff);
342 int decbranch(int pos
)
345 x
=*(uint32_t *)(cur_text_section
->data
+ pos
);
352 /* output a symbol and patch all calls to it */
353 void gsym_addr(int t
, int a
)
358 x
=(uint32_t *)(cur_text_section
->data
+ t
);
361 *x
=0xE1A00000; // nop
364 *x
|= encbranch(lt
,a
,1);
375 static uint32_t vfpr(int r
)
377 if(r
<TREG_F0
|| r
>TREG_F7
)
378 tcc_error("compiler error! register %i is no vfp register",r
);
382 static uint32_t fpr(int r
)
384 if(r
<TREG_F0
|| r
>TREG_F3
)
385 tcc_error("compiler error! register %i is no fpa register",r
);
390 static uint32_t intr(int r
)
394 if((r
<0 || r
>4) && r
!=14)
395 tcc_error("compiler error! register %i is no int register",r
);
399 static void calcaddr(uint32_t *base
, int *off
, int *sgn
, int maxoff
, unsigned shift
)
401 if(*off
>maxoff
|| *off
&((1<<shift
)-1)) {
408 y
=stuff_const(x
,*off
&~maxoff
);
414 y
=stuff_const(x
,(*off
+maxoff
)&~maxoff
);
418 *off
=((*off
+maxoff
)&~maxoff
)-*off
;
421 stuff_const_harder(x
,*off
&~maxoff
);
426 static uint32_t mapcc(int cc
)
431 return 0x30000000; /* CC/LO */
433 return 0x20000000; /* CS/HS */
435 return 0x00000000; /* EQ */
437 return 0x10000000; /* NE */
439 return 0x90000000; /* LS */
441 return 0x80000000; /* HI */
443 return 0x40000000; /* MI */
445 return 0x50000000; /* PL */
447 return 0xB0000000; /* LT */
449 return 0xA0000000; /* GE */
451 return 0xD0000000; /* LE */
453 return 0xC0000000; /* GT */
455 tcc_error("unexpected condition code");
456 return 0xE0000000; /* AL */
459 static int negcc(int cc
)
488 tcc_error("unexpected condition code");
492 /* load 'r' from value 'sv' */
493 void load(int r
, SValue
*sv
)
495 int v
, ft
, fc
, fr
, sign
;
512 uint32_t base
= 0xB; // fp
515 v1
.r
= VT_LOCAL
| VT_LVAL
;
517 load(base
=14 /* lr */, &v1
);
520 } else if(v
== VT_CONST
) {
528 } else if(v
< VT_CONST
) {
535 calcaddr(&base
,&fc
,&sign
,1020,2);
537 op
=0xED100A00; /* flds */
540 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
541 op
|=0x100; /* flds -> fldd */
542 o(op
|(vfpr(r
)<<12)|(fc
>>2)|(base
<<16));
547 #if LDOUBLE_SIZE == 8
548 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
551 if ((ft
& VT_BTYPE
) == VT_DOUBLE
)
553 else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
)
556 o(op
|(fpr(r
)<<12)|(fc
>>2)|(base
<<16));
558 } else if((ft
& (VT_BTYPE
|VT_UNSIGNED
)) == VT_BYTE
559 || (ft
& VT_BTYPE
) == VT_SHORT
) {
560 calcaddr(&base
,&fc
,&sign
,255,0);
562 if ((ft
& VT_BTYPE
) == VT_SHORT
)
564 if ((ft
& VT_UNSIGNED
) == 0)
568 o(op
|(intr(r
)<<12)|(base
<<16)|((fc
&0xf0)<<4)|(fc
&0xf));
570 calcaddr(&base
,&fc
,&sign
,4095,0);
574 if ((ft
& VT_BTYPE
) == VT_BYTE
)
576 o(op
|(intr(r
)<<12)|fc
|(base
<<16));
582 op
=stuff_const(0xE3A00000|(intr(r
)<<12),sv
->c
.ul
);
583 if (fr
& VT_SYM
|| !op
) {
584 o(0xE59F0000|(intr(r
)<<12));
587 greloc(cur_text_section
, sv
->sym
, ind
, R_ARM_ABS32
);
592 } else if (v
== VT_LOCAL
) {
593 op
=stuff_const(0xE28B0000|(intr(r
)<<12),sv
->c
.ul
);
594 if (fr
& VT_SYM
|| !op
) {
595 o(0xE59F0000|(intr(r
)<<12));
597 if(fr
& VT_SYM
) // needed ?
598 greloc(cur_text_section
, sv
->sym
, ind
, R_ARM_ABS32
);
600 o(0xE08B0000|(intr(r
)<<12)|intr(r
));
604 } else if(v
== VT_CMP
) {
605 o(mapcc(sv
->c
.ul
)|0x3A00001|(intr(r
)<<12));
606 o(mapcc(negcc(sv
->c
.ul
))|0x3A00000|(intr(r
)<<12));
608 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
611 o(0xE3A00000|(intr(r
)<<12)|t
);
614 o(0xE3A00000|(intr(r
)<<12)|(t
^1));
616 } else if (v
< VT_CONST
) {
619 o(0xEEB00A40|(vfpr(r
)<<12)|vfpr(v
)|T2CPR(ft
)); /* fcpyX */
621 o(0xEE008180|(fpr(r
)<<12)|fpr(v
));
624 o(0xE1A00000|(intr(r
)<<12)|intr(v
));
628 tcc_error("load unimplemented!");
631 /* store register 'r' in lvalue 'v' */
632 void store(int r
, SValue
*sv
)
635 int v
, ft
, fc
, fr
, sign
;
650 if (fr
& VT_LVAL
|| fr
== VT_LOCAL
) {
656 } else if(v
== VT_CONST
) {
667 calcaddr(&base
,&fc
,&sign
,1020,2);
669 op
=0xED000A00; /* fsts */
672 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
673 op
|=0x100; /* fsts -> fstd */
674 o(op
|(vfpr(r
)<<12)|(fc
>>2)|(base
<<16));
679 #if LDOUBLE_SIZE == 8
680 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
683 if ((ft
& VT_BTYPE
) == VT_DOUBLE
)
685 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
)
688 o(op
|(fpr(r
)<<12)|(fc
>>2)|(base
<<16));
691 } else if((ft
& VT_BTYPE
) == VT_SHORT
) {
692 calcaddr(&base
,&fc
,&sign
,255,0);
696 o(op
|(intr(r
)<<12)|(base
<<16)|((fc
&0xf0)<<4)|(fc
&0xf));
698 calcaddr(&base
,&fc
,&sign
,4095,0);
702 if ((ft
& VT_BTYPE
) == VT_BYTE
)
704 o(op
|(intr(r
)<<12)|fc
|(base
<<16));
709 tcc_error("store unimplemented");
712 static void gadd_sp(int val
)
714 stuff_const_harder(0xE28DD000,val
);
717 /* 'is_jmp' is '1' if it is a jump */
718 static void gcall_or_jmp(int is_jmp
)
721 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
724 x
=encbranch(ind
,ind
+vtop
->c
.ul
,0);
726 if (vtop
->r
& VT_SYM
) {
727 /* relocation case */
728 greloc(cur_text_section
, vtop
->sym
, ind
, R_ARM_PC24
);
730 put_elf_reloc(symtab_section
, cur_text_section
, ind
, R_ARM_PC24
, 0);
731 o(x
|(is_jmp
?0xE0000000:0xE1000000));
734 o(0xE28FE004); // add lr,pc,#4
735 o(0xE51FF004); // ldr pc,[pc,#-4]
736 if (vtop
->r
& VT_SYM
)
737 greloc(cur_text_section
, vtop
->sym
, ind
, R_ARM_ABS32
);
741 /* otherwise, indirect call */
744 o(0xE1A0E00F); // mov lr,pc
745 o(0xE1A0F000|intr(r
)); // mov pc,r
749 #ifdef TCC_ARM_HARDFLOAT
750 static int is_float_hgen_aggr(CType
*type
)
752 if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
754 int btype
, nb_fields
= 0;
757 btype
= ref
->type
.t
& VT_BTYPE
;
758 if (btype
== VT_FLOAT
|| btype
== VT_DOUBLE
) {
759 for(; ref
&& btype
== (ref
->type
.t
& VT_BTYPE
); ref
= ref
->next
, nb_fields
++);
760 return !ref
&& nb_fields
<= 4;
767 /* worst case: f(float, double, 3 float struct, double, 3 float struct, double) */
768 signed char avail
[3];
774 #define AVAIL_REGS_INITIALIZER (struct avail_regs) { { 0, 0, 0}, 0, 0, 0 }
776 /* Assign a register for a CPRC param with correct size and alignment
777 * size and align are in bytes, as returned by type_size */
778 int assign_fpreg(struct avail_regs
*avregs
, int align
, int size
)
782 if (avregs
->first_free_reg
== -1)
784 if (align
>> 3) { // alignment needed (base type: double)
785 first_reg
= avregs
->first_free_reg
;
787 avregs
->avail
[avregs
->last_hole
++] = first_reg
++;
789 if (size
== 4 && avregs
->first_hole
!= avregs
->last_hole
)
790 return avregs
->avail
[avregs
->first_hole
++];
792 first_reg
= avregs
->first_free_reg
;
794 if (first_reg
+ size
/ 4 <= 16) {
795 avregs
->first_free_reg
= first_reg
+ size
/ 4;
798 avregs
->first_free_reg
= -1;
803 /* Return 1 if this function returns via an sret pointer, 0 otherwise */
804 ST_FUNC
int gfunc_sret(CType
*vt
, CType
*ret
, int *ret_align
) {
807 size
= type_size(vt
, &align
);
821 /* Generate function call. The function address is pushed first, then
822 all the parameters in call order. This functions pops all the
823 parameters and the function address. */
824 void gfunc_call(int nb_args
)
826 int size
, align
, r
, args_size
, i
, ncrn
, ncprn
, argno
, vfp_argno
;
827 signed char plan
[4][2]={{-1,-1},{-1,-1},{-1,-1},{-1,-1}};
828 SValue
*before_stack
= NULL
; /* SValue before first on stack argument */
829 SValue
*before_vfpreg_hfa
= NULL
; /* SValue before first in VFP reg hfa argument */
830 #ifdef TCC_ARM_HARDFLOAT
831 struct avail_regs avregs
= AVAIL_REGS_INITIALIZER
;
832 signed char vfp_plan
[16];
836 int plan2
[4]={0,0,0,0};
841 #ifdef TCC_ARM_HARDFLOAT
842 memset(vfp_plan
, -1, sizeof(vfp_plan
));
843 memset(plan2
, 0, sizeof(plan2
));
844 variadic
= (vtop
[-nb_args
].type
.ref
->c
== FUNC_ELLIPSIS
);
846 r
= vtop
->r
& VT_VALMASK
;
847 if (r
== VT_CMP
|| (r
& ~1) == VT_JMP
)
850 if((vtop
[-nb_args
].type
.ref
->type
.t
& VT_BTYPE
) == VT_STRUCT
851 && type_size(&vtop
[-nb_args
].type
.ref
->type
, &align
) <= 4) {
854 vtop
[-nb_args
]=vtop
[-nb_args
+1];
855 vtop
[-nb_args
+1]=tmp
;
859 vpushi(0), nb_args
++;
860 vtop
->type
.t
= VT_LLONG
;
862 ncrn
= ncprn
= argno
= vfp_argno
= args_size
= 0;
863 /* Assign argument to registers and stack with alignment.
864 If, considering alignment constraints, enough registers of the correct type
865 (core or VFP) are free for the current argument, assign them to it, else
866 allocate on stack with correct alignment. Whenever a structure is allocated
867 in registers or on stack, it is always put on the stack at this stage. The
868 stack is divided in 3 zones. The zone are, from low addresses to high
869 addresses: structures to be loaded in core registers, structures to be
870 loaded in VFP registers, argument allocated to stack. SValue's representing
871 structures in the first zone are moved just after the SValue pointed by
872 before_vfpreg_hfa. SValue's representing structures in the second zone are
873 moved just after the SValue pointer by before_stack. */
874 for(i
= nb_args
; i
-- ;) {
875 int j
, assigned_vfpreg
= 0;
876 size
= type_size(&vtop
[-i
].type
, &align
);
877 switch(vtop
[-i
].type
.t
& VT_BTYPE
) {
882 #ifdef TCC_ARM_HARDFLOAT
884 int hfa
= 0; /* Homogeneous float aggregate */
886 if (is_float(vtop
[-i
].type
.t
)
887 || (hfa
= is_float_hgen_aggr(&vtop
[-i
].type
))) {
890 assigned_vfpreg
= assign_fpreg(&avregs
, align
, size
);
891 end_reg
= assigned_vfpreg
+ (size
- 1) / 4;
892 if (assigned_vfpreg
>= 0) {
893 vfp_plan
[vfp_argno
++]=TREG_F0
+ assigned_vfpreg
/2;
895 /* before_stack can only have been set because all core registers
896 are assigned, so no need to care about before_vfpreg_hfa if
897 before_stack is set */
899 vrote(&vtop
[-i
], &vtop
[-i
] - before_stack
);
901 } else if (!before_vfpreg_hfa
)
902 before_vfpreg_hfa
= &vtop
[-i
-1];
903 for (j
= assigned_vfpreg
; j
<= end_reg
; j
++)
910 /* No need to update before_stack as no more hfa can be allocated in
912 if (!before_vfpreg_hfa
)
913 before_vfpreg_hfa
= &vtop
[-i
-1];
919 ncrn
= (ncrn
+ (align
-1)/4) & -(align
/4);
920 size
= (size
+ 3) & -4;
921 if (ncrn
+ size
/4 <= 4 || (ncrn
< 4 && assigned_vfpreg
!= -1)) {
922 /* Either there is HFA in VFP registers, or there is arguments on stack,
923 it cannot be both. Hence either before_stack already points after
924 the slot where the vtop[-i] SValue is moved, or before_stack will not
926 if (before_vfpreg_hfa
) {
927 vrote(&vtop
[-i
], &vtop
[-i
] - before_vfpreg_hfa
);
930 for (j
= ncrn
; j
< 4 && j
< ncrn
+ size
/ 4; j
++)
934 args_size
= (ncrn
- 4) * 4;
936 before_stack
= &vtop
[-i
-1];
941 /* No need to set before_vfpreg_hfa if not set since there will no
942 longer be any structure assigned to core registers */
944 before_stack
= &vtop
[-i
-1];
955 int is_long
= (vtop
[-i
].type
.t
& VT_BTYPE
) == VT_LLONG
;
958 ncrn
= (ncrn
+ 1) & -2;
964 plan
[argno
++][0]=ncrn
++;
966 plan
[argno
-1][1]=ncrn
++;
973 if(args_size
& (align
-1)) {
975 vtop
->type
.t
= VT_VOID
; /* padding */
982 args_size
+= (size
+ 3) & -4;
987 args_size
= keep
= 0;
988 for(i
= 0;i
< nb_args
; i
++) {
990 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
) {
991 size
= type_size(&vtop
->type
, &align
);
992 /* align to stack align size */
993 size
= (size
+ 3) & -4;
994 /* allocate the necessary size on stack */
996 /* generate structure store */
998 o(0xE1A0000D|(intr(r
)<<12));
999 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1004 } else if (is_float(vtop
->type
.t
)) {
1005 #ifdef TCC_ARM_HARDFLOAT
1006 if (!variadic
&& --vfp_argno
<16 && vfp_plan
[vfp_argno
]!=-1) {
1007 plan2
[keep
++]=vfp_plan
[vfp_argno
];
1012 r
=vfpr(gv(RC_FLOAT
))<<12;
1014 if ((vtop
->type
.t
& VT_BTYPE
) != VT_FLOAT
)
1017 r
|=0x101; /* fstms -> fstmd */
1021 r
=fpr(gv(RC_FLOAT
))<<12;
1022 if ((vtop
->type
.t
& VT_BTYPE
) == VT_FLOAT
)
1024 else if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1027 size
= LDOUBLE_SIZE
;
1034 o(0xED2D0100|r
|(size
>>2));
1040 /* simple type (currently always same size) */
1041 /* XXX: implicit cast ? */
1043 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1046 if(--argno
<4 && plan
[argno
][1]!=-1)
1052 o(0xE52D0004|(intr(r
)<<12)); /* str r,[sp,#-4]! */
1062 if(--argno
<4 && plan
[argno
][0]!=-1)
1065 if(vtop
->type
.t
== VT_VOID
) {
1067 o(0xE24DD004); /* sub sp,sp,#4 */
1073 o(0xE52D0004|(intr(r
)<<12)); /* str r,[sp,#-4]! */
1083 for(i
= 0; i
< keep
; i
++) {
1085 gv(regmask(plan2
[i
]));
1086 #ifdef TCC_ARM_HARDFLOAT
1087 /* arg is in s(2d+1): plan2[i]<plan2[i+1] => alignment occured (ex f,d,f) */
1088 if (i
< keep
- 1 && is_float(vtop
->type
.t
) && (plan2
[i
] <= plan2
[i
+ 1])) {
1089 o(0xEEF00A40|(vfpr(plan2
[i
])<<12)|vfpr(plan2
[i
]));
1093 save_regs(keep
); /* save used temporary registers */
1099 todo
&=((1<<ncrn
)-1);
1111 args_size
-=nb_regs
*4;
1117 if(vfp_todo
&(1<<i
)) {
1118 o(0xED9D0A00|(i
&1)<<22|(i
>>1)<<12|nb_fregs
);
1120 /* There might be 2 floats in a double VFP reg but that doesn't seem
1123 vtop
->r
=TREG_F0
+i
/2;
1128 gadd_sp(nb_fregs
*4);
1129 args_size
-=nb_fregs
*4;
1137 if((vtop
->type
.ref
->type
.t
& VT_BTYPE
) == VT_STRUCT
1138 && type_size(&vtop
->type
.ref
->type
, &align
) <= 4)
1140 store(REG_IRET
,vtop
-keep
);
1144 #ifdef TCC_ARM_HARDFLOAT
1145 else if(variadic
&& is_float(vtop
->type
.ref
->type
.t
)) {
1147 else if(is_float(vtop
->type
.ref
->type
.t
)) {
1149 if((vtop
->type
.ref
->type
.t
& VT_BTYPE
) == VT_FLOAT
) {
1150 o(0xEE000A10); /* fmsr s0,r0 */
1152 o(0xEE000B10); /* fmdlr d0,r0 */
1153 o(0xEE201B10); /* fmdhr d0,r1 */
1162 /* generate function prolog of type 't' */
1163 void gfunc_prolog(CType
*func_type
)
1166 int n
,nf
,size
,align
, variadic
, struct_ret
= 0;
1167 #ifdef TCC_ARM_HARDFLOAT
1168 struct avail_regs avregs
= AVAIL_REGS_INITIALIZER
;
1171 sym
= func_type
->ref
;
1172 func_vt
= sym
->type
;
1175 variadic
= (func_type
->ref
->c
== FUNC_ELLIPSIS
);
1176 if((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
1177 && type_size(&func_vt
,&align
) > 4)
1181 func_vc
= 12; /* Offset from fp of the place to store the result */
1183 for(sym2
=sym
->next
;sym2
&& (n
<4 || nf
<16);sym2
=sym2
->next
) {
1184 size
= type_size(&sym2
->type
, &align
);
1185 #ifdef TCC_ARM_HARDFLOAT
1186 if (!variadic
&& (is_float(sym2
->type
.t
)
1187 || is_float_hgen_aggr(&sym2
->type
))) {
1188 int tmpnf
= assign_fpreg(&avregs
, align
, size
) + 1;
1189 nf
= (tmpnf
> nf
) ? tmpnf
: nf
;
1193 n
+= (size
+ 3) / 4;
1195 o(0xE1A0C00D); /* mov ip,sp */
1204 o(0xE92D0000|((1<<n
)-1)); /* save r0-r4 on stack if needed */
1209 nf
=(nf
+1)&-2; /* nf => HARDFLOAT => EABI */
1210 o(0xED2D0A00|nf
); /* save s0-s15 on stack if needed */
1212 o(0xE92D5800); /* save fp, ip, lr */
1213 o(0xE1A0B00D); /* mov fp, sp */
1214 func_sub_sp_offset
= ind
;
1215 o(0xE1A00000); /* nop, leave space for stack adjustment in epilogue */
1217 int addr
, pn
= struct_ret
, sn
= 0; /* pn=core, sn=stack */
1219 #ifdef TCC_ARM_HARDFLOAT
1221 avregs
= AVAIL_REGS_INITIALIZER
;
1223 while ((sym
= sym
->next
)) {
1226 size
= type_size(type
, &align
);
1227 size
= (size
+ 3) >> 2;
1228 align
= (align
+ 3) & ~3;
1229 #ifdef TCC_ARM_HARDFLOAT
1230 if (!variadic
&& (is_float(sym
->type
.t
)
1231 || is_float_hgen_aggr(&sym
->type
))) {
1232 int fpn
= assign_fpreg(&avregs
, align
, size
<< 2);
1241 pn
= (pn
+ (align
-1)/4) & -(align
/4);
1243 addr
= (nf
+ pn
) * 4;
1248 #ifdef TCC_ARM_HARDFLOAT
1252 sn
= (sn
+ (align
-1)/4) & -(align
/4);
1254 addr
= (n
+ nf
+ sn
) * 4;
1257 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| lvalue_type(type
->t
), addr
+12);
1265 /* generate function epilog */
1266 void gfunc_epilog(void)
1271 /* Useless but harmless copy of the float result into main register(s) in case
1272 of variadic function in the hardfloat variant */
1273 if(is_float(func_vt
.t
)) {
1274 if((func_vt
.t
& VT_BTYPE
) == VT_FLOAT
)
1275 o(0xEE100A10); /* fmrs r0, s0 */
1277 o(0xEE100B10); /* fmrdl r0, d0 */
1278 o(0xEE301B10); /* fmrdh r1, d0 */
1282 o(0xE89BA800); /* restore fp, sp, pc */
1283 diff
= (-loc
+ 3) & -4;
1286 diff
= ((diff
+ 11) & -8) - 4;
1289 x
=stuff_const(0xE24BD000, diff
); /* sub sp,fp,# */
1291 *(uint32_t *)(cur_text_section
->data
+ func_sub_sp_offset
) = x
;
1295 o(0xE59FC004); /* ldr ip,[pc+4] */
1296 o(0xE04BD00C); /* sub sp,fp,ip */
1297 o(0xE1A0F00E); /* mov pc,lr */
1299 *(uint32_t *)(cur_text_section
->data
+ func_sub_sp_offset
) = 0xE1000000|encbranch(func_sub_sp_offset
,addr
,1);
1304 /* generate a jump to a label */
1309 o(0xE0000000|encbranch(r
,t
,1));
1313 /* generate a jump to a fixed address */
1314 void gjmp_addr(int a
)
1319 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1320 int gtst(int inv
, int t
)
1324 v
= vtop
->r
& VT_VALMASK
;
1327 op
=mapcc(inv
?negcc(vtop
->c
.i
):vtop
->c
.i
);
1328 op
|=encbranch(r
,t
,1);
1331 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1332 if ((v
& 1) == inv
) {
1341 p
= decbranch(lp
=p
);
1343 x
= (uint32_t *)(cur_text_section
->data
+ lp
);
1345 *x
|= encbranch(lp
,t
,1);
1354 if (is_float(vtop
->type
.t
)) {
1357 o(0xEEB50A40|(vfpr(r
)<<12)|T2CPR(vtop
->type
.t
)); /* fcmpzX */
1358 o(0xEEF1FA10); /* fmstat */
1360 o(0xEE90F118|(fpr(r
)<<16));
1364 return gtst(inv
, t
);
1365 } else if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1366 /* constant jmp optimization */
1367 if ((vtop
->c
.i
!= 0) != inv
)
1371 o(0xE3300000|(intr(v
)<<16));
1374 return gtst(inv
, t
);
1381 /* generate an integer binary operation */
1382 void gen_opi(int op
)
1385 uint32_t opc
= 0, r
, fr
;
1386 unsigned short retreg
= REG_IRET
;
1394 case TOK_ADDC1
: /* add with carry generation */
1402 case TOK_SUBC1
: /* sub with carry generation */
1406 case TOK_ADDC2
: /* add with carry use */
1410 case TOK_SUBC2
: /* sub with carry use */
1427 gv2(RC_INT
, RC_INT
);
1431 o(0xE0000090|(intr(r
)<<16)|(intr(r
)<<8)|intr(fr
));
1456 func
=TOK___aeabi_idivmod
;
1465 func
=TOK___aeabi_uidivmod
;
1473 gv2(RC_INT
, RC_INT
);
1474 r
=intr(vtop
[-1].r2
=get_reg(RC_INT
));
1476 vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(c
));
1478 o(0xE0800090|(r
<<16)|(intr(vtop
->r
)<<12)|(intr(c
)<<8)|intr(vtop
[1].r
));
1487 if((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1488 if(opc
== 4 || opc
== 5 || opc
== 0xc) {
1490 opc
|=2; // sub -> rsb
1493 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
||
1494 (vtop
->r
& (VT_VALMASK
& ~1)) == VT_JMP
)
1499 opc
=0xE0000000|(opc
<<20)|(c
<<16);
1500 if((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1502 x
=stuff_const(opc
|0x2000000,vtop
->c
.i
);
1504 r
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(vtop
[-1].r
)));
1509 fr
=intr(gv(RC_INT
));
1510 r
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,two2mask(vtop
->r
,vtop
[-1].r
)));
1514 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1520 opc
=0xE1A00000|(opc
<<5);
1521 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
||
1522 (vtop
->r
& (VT_VALMASK
& ~1)) == VT_JMP
)
1528 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1529 fr
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(vtop
[-1].r
)));
1530 c
= vtop
->c
.i
& 0x1f;
1531 o(opc
|(c
<<7)|(fr
<<12));
1533 fr
=intr(gv(RC_INT
));
1534 c
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,two2mask(vtop
->r
,vtop
[-1].r
)));
1535 o(opc
|(c
<<12)|(fr
<<8)|0x10);
1540 vpush_global_sym(&func_old_type
, func
);
1547 tcc_error("gen_opi %i unimplemented!",op
);
1552 static int is_zero(int i
)
1554 if((vtop
[i
].r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) != VT_CONST
)
1556 if (vtop
[i
].type
.t
== VT_FLOAT
)
1557 return (vtop
[i
].c
.f
== 0.f
);
1558 else if (vtop
[i
].type
.t
== VT_DOUBLE
)
1559 return (vtop
[i
].c
.d
== 0.0);
1560 return (vtop
[i
].c
.ld
== 0.l
);
1563 /* generate a floating point operation 'v = t1 op t2' instruction. The
1564 * two operands are guaranted to have the same floating point type */
1565 void gen_opf(int op
)
1569 x
=0xEE000A00|T2CPR(vtop
->type
.t
);
1587 x
|=0x810000; /* fsubX -> fnegX */
1600 if(op
< TOK_ULT
|| op
> TOK_GT
) {
1601 tcc_error("unknown fp op %x!",op
);
1607 case TOK_LT
: op
=TOK_GT
; break;
1608 case TOK_GE
: op
=TOK_ULE
; break;
1609 case TOK_LE
: op
=TOK_GE
; break;
1610 case TOK_GT
: op
=TOK_ULT
; break;
1613 x
|=0xB40040; /* fcmpX */
1614 if(op
!=TOK_EQ
&& op
!=TOK_NE
)
1615 x
|=0x80; /* fcmpX -> fcmpeX */
1618 o(x
|0x10000|(vfpr(gv(RC_FLOAT
))<<12)); /* fcmp(e)X -> fcmp(e)zX */
1620 x
|=vfpr(gv(RC_FLOAT
));
1622 o(x
|(vfpr(gv(RC_FLOAT
))<<12));
1625 o(0xEEF1FA10); /* fmstat */
1628 case TOK_LE
: op
=TOK_ULE
; break;
1629 case TOK_LT
: op
=TOK_ULT
; break;
1630 case TOK_UGE
: op
=TOK_GE
; break;
1631 case TOK_UGT
: op
=TOK_GT
; break;
1648 vtop
->r
=get_reg_ex(RC_FLOAT
,r
);
1651 o(x
|(vfpr(vtop
->r
)<<12));
1655 static uint32_t is_fconst()
1659 if((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) != VT_CONST
)
1661 if (vtop
->type
.t
== VT_FLOAT
)
1663 else if (vtop
->type
.t
== VT_DOUBLE
)
1693 /* generate a floating point operation 'v = t1 op t2' instruction. The
1694 two operands are guaranted to have the same floating point type */
1695 void gen_opf(int op
)
1697 uint32_t x
, r
, r2
, c1
, c2
;
1698 //fputs("gen_opf\n",stderr);
1704 #if LDOUBLE_SIZE == 8
1705 if ((vtop
->type
.t
& VT_BTYPE
) != VT_FLOAT
)
1708 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1710 else if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
)
1721 r
=fpr(gv(RC_FLOAT
));
1728 r2
=fpr(gv(RC_FLOAT
));
1737 r
=fpr(gv(RC_FLOAT
));
1739 } else if(c1
&& c1
<=0xf) {
1742 r
=fpr(gv(RC_FLOAT
));
1747 r
=fpr(gv(RC_FLOAT
));
1749 r2
=fpr(gv(RC_FLOAT
));
1758 r
=fpr(gv(RC_FLOAT
));
1763 r2
=fpr(gv(RC_FLOAT
));
1771 r
=fpr(gv(RC_FLOAT
));
1773 } else if(c1
&& c1
<=0xf) {
1776 r
=fpr(gv(RC_FLOAT
));
1781 r
=fpr(gv(RC_FLOAT
));
1783 r2
=fpr(gv(RC_FLOAT
));
1787 if(op
>= TOK_ULT
&& op
<= TOK_GT
) {
1788 x
|=0xd0f110; // cmfe
1789 /* bug (intention?) in Linux FPU emulator
1790 doesn't set carry if equal */
1796 tcc_error("unsigned comparision on floats?");
1802 op
=TOK_ULE
; /* correct in unordered case only if AC bit in FPSR set */
1806 x
&=~0x400000; // cmfe -> cmf
1828 r
=fpr(gv(RC_FLOAT
));
1835 r2
=fpr(gv(RC_FLOAT
));
1837 vtop
[-1].r
= VT_CMP
;
1840 tcc_error("unknown fp op %x!",op
);
1844 if(vtop
[-1].r
== VT_CMP
)
1850 vtop
[-1].r
=get_reg_ex(RC_FLOAT
,two2mask(vtop
[-1].r
,c1
));
1854 o(x
|(r
<<16)|(c1
<<12)|r2
);
1858 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1859 and 'long long' cases. */
1860 ST_FUNC
void gen_cvt_itof1(int t
)
1864 bt
=vtop
->type
.t
& VT_BTYPE
;
1865 if(bt
== VT_INT
|| bt
== VT_SHORT
|| bt
== VT_BYTE
) {
1871 r2
=vfpr(vtop
->r
=get_reg(RC_FLOAT
));
1872 o(0xEE000A10|(r
<<12)|(r2
<<16)); /* fmsr */
1874 if(!(vtop
->type
.t
& VT_UNSIGNED
))
1875 r2
|=0x80; /* fuitoX -> fsituX */
1876 o(0xEEB80A40|r2
|T2CPR(t
)); /* fYitoX*/
1878 r2
=fpr(vtop
->r
=get_reg(RC_FLOAT
));
1879 if((t
& VT_BTYPE
) != VT_FLOAT
)
1880 dsize
=0x80; /* flts -> fltd */
1881 o(0xEE000110|dsize
|(r2
<<16)|(r
<<12)); /* flts */
1882 if((vtop
->type
.t
& (VT_UNSIGNED
|VT_BTYPE
)) == (VT_UNSIGNED
|VT_INT
)) {
1884 o(0xE3500000|(r
<<12)); /* cmp */
1885 r
=fpr(get_reg(RC_FLOAT
));
1886 if(last_itod_magic
) {
1887 off
=ind
+8-last_itod_magic
;
1892 o(0xBD1F0100|(r
<<12)|off
); /* ldflts */
1894 o(0xEA000000); /* b */
1895 last_itod_magic
=ind
;
1896 o(0x4F800000); /* 4294967296.0f */
1898 o(0xBE000100|dsize
|(r2
<<16)|(r2
<<12)|r
); /* adflt */
1902 } else if(bt
== VT_LLONG
) {
1904 CType
*func_type
= 0;
1905 if((t
& VT_BTYPE
) == VT_FLOAT
) {
1906 func_type
= &func_float_type
;
1907 if(vtop
->type
.t
& VT_UNSIGNED
)
1908 func
=TOK___floatundisf
;
1910 func
=TOK___floatdisf
;
1911 #if LDOUBLE_SIZE != 8
1912 } else if((t
& VT_BTYPE
) == VT_LDOUBLE
) {
1913 func_type
= &func_ldouble_type
;
1914 if(vtop
->type
.t
& VT_UNSIGNED
)
1915 func
=TOK___floatundixf
;
1917 func
=TOK___floatdixf
;
1918 } else if((t
& VT_BTYPE
) == VT_DOUBLE
) {
1920 } else if((t
& VT_BTYPE
) == VT_DOUBLE
|| (t
& VT_BTYPE
) == VT_LDOUBLE
) {
1922 func_type
= &func_double_type
;
1923 if(vtop
->type
.t
& VT_UNSIGNED
)
1924 func
=TOK___floatundidf
;
1926 func
=TOK___floatdidf
;
1929 vpush_global_sym(func_type
, func
);
1937 tcc_error("unimplemented gen_cvt_itof %x!",vtop
->type
.t
);
1940 /* convert fp to int 't' type */
1941 void gen_cvt_ftoi(int t
)
1947 r2
=vtop
->type
.t
& VT_BTYPE
;
1950 r
=vfpr(gv(RC_FLOAT
));
1952 o(0xEEBC0AC0|(r
<<12)|r
|T2CPR(r2
)|u
); /* ftoXizY */
1953 r2
=intr(vtop
->r
=get_reg(RC_INT
));
1954 o(0xEE100A10|(r
<<16)|(r2
<<12));
1959 func
=TOK___fixunssfsi
;
1960 #if LDOUBLE_SIZE != 8
1961 else if(r2
== VT_LDOUBLE
)
1962 func
=TOK___fixunsxfsi
;
1963 else if(r2
== VT_DOUBLE
)
1965 else if(r2
== VT_LDOUBLE
|| r2
== VT_DOUBLE
)
1967 func
=TOK___fixunsdfsi
;
1969 r
=fpr(gv(RC_FLOAT
));
1970 r2
=intr(vtop
->r
=get_reg(RC_INT
));
1971 o(0xEE100170|(r2
<<12)|r
);
1975 } else if(t
== VT_LLONG
) { // unsigned handled in gen_cvt_ftoi1
1978 #if LDOUBLE_SIZE != 8
1979 else if(r2
== VT_LDOUBLE
)
1981 else if(r2
== VT_DOUBLE
)
1983 else if(r2
== VT_LDOUBLE
|| r2
== VT_DOUBLE
)
1988 vpush_global_sym(&func_old_type
, func
);
1993 vtop
->r2
= REG_LRET
;
1997 tcc_error("unimplemented gen_cvt_ftoi!");
2000 /* convert from one floating point type to another */
2001 void gen_cvt_ftof(int t
)
2004 if(((vtop
->type
.t
& VT_BTYPE
) == VT_FLOAT
) != ((t
& VT_BTYPE
) == VT_FLOAT
)) {
2005 uint32_t r
= vfpr(gv(RC_FLOAT
));
2006 o(0xEEB70AC0|(r
<<12)|r
|T2CPR(vtop
->type
.t
));
2009 /* all we have to do on i386 and FPA ARM is to put the float in a register */
2014 /* computed goto support */
2021 /* Save the stack pointer onto the stack and return the location of its address */
2022 ST_FUNC
void gen_vla_sp_save(int addr
) {
2023 tcc_error("variable length arrays unsupported for this target");
2026 /* Restore the SP from a location on the stack */
2027 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2028 tcc_error("variable length arrays unsupported for this target");
2031 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2032 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2033 tcc_error("variable length arrays unsupported for this target");
2036 /* end of ARM code generator */
2037 /*************************************************************/
2039 /*************************************************************/