2 * ARMv4 code generator for TCC
4 * Copyright (c) 2003 Daniel Glöckner
5 * Copyright (c) 2012 Thomas Preud'homme
7 * Based on i386-gen.c by Fabrice Bellard
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #ifdef TARGET_DEFS_ONLY
26 #if defined(TCC_ARM_EABI) && !defined(TCC_ARM_VFP)
27 #error "Currently TinyCC only supports float computation with VFP instructions"
30 /* number of available registers */
37 #ifndef TCC_ARM_VERSION
38 # define TCC_ARM_VERSION 5
41 /* a register can belong to several classes. The classes must be
42 sorted from more general to more precise (see gv2() code which does
43 assumptions on it). */
44 #define RC_INT 0x0001 /* generic integer register */
45 #define RC_FLOAT 0x0002 /* generic float register */
61 #define RC_IRET RC_R0 /* function return: integer register */
62 #define RC_LRET RC_R1 /* function return: second integer register */
63 #define RC_FRET RC_F0 /* function return: float register */
65 /* pretty names for the registers */
85 #define T2CPR(t) (((t) & VT_BTYPE) != VT_FLOAT ? 0x100 : 0)
88 /* return registers for function */
89 #define REG_IRET TREG_R0 /* single word int return register */
90 #define REG_LRET TREG_R1 /* second word return register (for long long) */
91 #define REG_FRET TREG_F0 /* float return register */
94 #define TOK___divdi3 TOK___aeabi_ldivmod
95 #define TOK___moddi3 TOK___aeabi_ldivmod
96 #define TOK___udivdi3 TOK___aeabi_uldivmod
97 #define TOK___umoddi3 TOK___aeabi_uldivmod
100 /* defined if function parameters must be evaluated in reverse order */
101 #define INVERT_FUNC_PARAMS
103 /* defined if structures are passed as pointers. Otherwise structures
104 are directly pushed on stack. */
105 /* #define FUNC_STRUCT_PARAM_AS_PTR */
107 /* pointer size, in bytes */
110 /* long double size and alignment, in bytes */
112 #define LDOUBLE_SIZE 8
116 #define LDOUBLE_SIZE 8
120 #define LDOUBLE_ALIGN 8
122 #define LDOUBLE_ALIGN 4
125 /* maximum alignment (for aligned attribute support) */
128 #define CHAR_IS_UNSIGNED
130 /******************************************************/
133 #define EM_TCC_TARGET EM_ARM
135 /* relocation type for 32 bit data relocation */
136 #define R_DATA_32 R_ARM_ABS32
137 #define R_DATA_PTR R_ARM_ABS32
138 #define R_JMP_SLOT R_ARM_JUMP_SLOT
139 #define R_COPY R_ARM_COPY
141 #define ELF_START_ADDR 0x00008000
142 #define ELF_PAGE_SIZE 0x1000
149 enum float_abi float_abi
;
151 /******************************************************/
152 #else /* ! TARGET_DEFS_ONLY */
153 /******************************************************/
156 ST_DATA
const int reg_classes
[NB_REGS
] = {
157 /* r0 */ RC_INT
| RC_R0
,
158 /* r1 */ RC_INT
| RC_R1
,
159 /* r2 */ RC_INT
| RC_R2
,
160 /* r3 */ RC_INT
| RC_R3
,
161 /* r12 */ RC_INT
| RC_R12
,
162 /* f0 */ RC_FLOAT
| RC_F0
,
163 /* f1 */ RC_FLOAT
| RC_F1
,
164 /* f2 */ RC_FLOAT
| RC_F2
,
165 /* f3 */ RC_FLOAT
| RC_F3
,
167 /* d4/s8 */ RC_FLOAT
| RC_F4
,
168 /* d5/s10 */ RC_FLOAT
| RC_F5
,
169 /* d6/s12 */ RC_FLOAT
| RC_F6
,
170 /* d7/s14 */ RC_FLOAT
| RC_F7
,
174 static int func_sub_sp_offset
, last_itod_magic
;
177 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
178 static CType float_type
, double_type
, func_float_type
, func_double_type
;
179 ST_FUNC
void arm_init(struct TCCState
*s
)
181 float_type
.t
= VT_FLOAT
;
182 double_type
.t
= VT_DOUBLE
;
183 func_float_type
.t
= VT_FUNC
;
184 func_float_type
.ref
= sym_push(SYM_FIELD
, &float_type
, FUNC_CDECL
, FUNC_OLD
);
185 func_double_type
.t
= VT_FUNC
;
186 func_double_type
.ref
= sym_push(SYM_FIELD
, &double_type
, FUNC_CDECL
, FUNC_OLD
);
188 float_abi
= s
->float_abi
;
191 #define func_float_type func_old_type
192 #define func_double_type func_old_type
193 #define func_ldouble_type func_old_type
194 ST_FUNC
void arm_init(void) {}
197 static int two2mask(int a
,int b
) {
198 return (reg_classes
[a
]|reg_classes
[b
])&~(RC_INT
|RC_FLOAT
);
201 static int regmask(int r
) {
202 return reg_classes
[r
]&~(RC_INT
|RC_FLOAT
);
205 /******************************************************/
208 char *default_elfinterp(struct TCCState
*s
)
210 if (s
->float_abi
== ARM_HARD_FLOAT
)
211 return "/lib/ld-linux-armhf.so.3";
213 return "/lib/ld-linux.so.3";
219 /* this is a good place to start adding big-endian support*/
223 if (!cur_text_section
)
224 tcc_error("compiler error! This happens f.ex. if the compiler\n"
225 "can't evaluate constant expressions outside of a function.");
226 if (ind1
> cur_text_section
->data_allocated
)
227 section_realloc(cur_text_section
, ind1
);
228 cur_text_section
->data
[ind
++] = i
&255;
230 cur_text_section
->data
[ind
++] = i
&255;
232 cur_text_section
->data
[ind
++] = i
&255;
234 cur_text_section
->data
[ind
++] = i
;
237 static uint32_t stuff_const(uint32_t op
, uint32_t c
)
240 uint32_t nc
= 0, negop
= 0;
250 case 0x1A00000: //mov
251 case 0x1E00000: //mvn
258 return (op
&0xF010F000)|((op
>>16)&0xF)|0x1E00000;
262 return (op
&0xF010F000)|((op
>>16)&0xF)|0x1A00000;
263 case 0x1C00000: //bic
268 case 0x1800000: //orr
270 return (op
&0xFFF0FFFF)|0x1E00000;
276 if(c
<256) /* catch undefined <<32 */
279 m
=(0xff>>i
)|(0xff<<(32-i
));
281 return op
|(i
<<7)|(c
<<i
)|(c
>>(32-i
));
291 void stuff_const_harder(uint32_t op
, uint32_t v
) {
297 uint32_t a
[16], nv
, no
, o2
, n2
;
300 o2
=(op
&0xfff0ffff)|((op
&0xf000)<<4);;
302 a
[i
]=(a
[i
-1]>>2)|(a
[i
-1]<<30);
304 for(j
=i
<4?i
+12:15;j
>=i
+4;j
--)
305 if((v
&(a
[i
]|a
[j
]))==v
) {
306 o(stuff_const(op
,v
&a
[i
]));
307 o(stuff_const(o2
,v
&a
[j
]));
314 for(j
=i
<4?i
+12:15;j
>=i
+4;j
--)
315 if((nv
&(a
[i
]|a
[j
]))==nv
) {
316 o(stuff_const(no
,nv
&a
[i
]));
317 o(stuff_const(n2
,nv
&a
[j
]));
322 for(k
=i
<4?i
+12:15;k
>=j
+4;k
--)
323 if((v
&(a
[i
]|a
[j
]|a
[k
]))==v
) {
324 o(stuff_const(op
,v
&a
[i
]));
325 o(stuff_const(o2
,v
&a
[j
]));
326 o(stuff_const(o2
,v
&a
[k
]));
333 for(k
=i
<4?i
+12:15;k
>=j
+4;k
--)
334 if((nv
&(a
[i
]|a
[j
]|a
[k
]))==nv
) {
335 o(stuff_const(no
,nv
&a
[i
]));
336 o(stuff_const(n2
,nv
&a
[j
]));
337 o(stuff_const(n2
,nv
&a
[k
]));
340 o(stuff_const(op
,v
&a
[0]));
341 o(stuff_const(o2
,v
&a
[4]));
342 o(stuff_const(o2
,v
&a
[8]));
343 o(stuff_const(o2
,v
&a
[12]));
347 ST_FUNC
uint32_t encbranch(int pos
, int addr
, int fail
)
351 if(addr
>=0x1000000 || addr
<-0x1000000) {
353 tcc_error("FIXME: function bigger than 32MB");
356 return 0x0A000000|(addr
&0xffffff);
359 int decbranch(int pos
)
362 x
=*(uint32_t *)(cur_text_section
->data
+ pos
);
369 /* output a symbol and patch all calls to it */
370 void gsym_addr(int t
, int a
)
375 x
=(uint32_t *)(cur_text_section
->data
+ t
);
378 *x
=0xE1A00000; // nop
381 *x
|= encbranch(lt
,a
,1);
392 static uint32_t vfpr(int r
)
394 if(r
<TREG_F0
|| r
>TREG_F7
)
395 tcc_error("compiler error! register %i is no vfp register",r
);
399 static uint32_t fpr(int r
)
401 if(r
<TREG_F0
|| r
>TREG_F3
)
402 tcc_error("compiler error! register %i is no fpa register",r
);
407 static uint32_t intr(int r
)
411 if((r
<0 || r
>4) && r
!=14)
412 tcc_error("compiler error! register %i is no int register",r
);
416 static void calcaddr(uint32_t *base
, int *off
, int *sgn
, int maxoff
, unsigned shift
)
418 if(*off
>maxoff
|| *off
&((1<<shift
)-1)) {
425 y
=stuff_const(x
,*off
&~maxoff
);
431 y
=stuff_const(x
,(*off
+maxoff
)&~maxoff
);
435 *off
=((*off
+maxoff
)&~maxoff
)-*off
;
438 stuff_const_harder(x
,*off
&~maxoff
);
443 static uint32_t mapcc(int cc
)
448 return 0x30000000; /* CC/LO */
450 return 0x20000000; /* CS/HS */
452 return 0x00000000; /* EQ */
454 return 0x10000000; /* NE */
456 return 0x90000000; /* LS */
458 return 0x80000000; /* HI */
460 return 0x40000000; /* MI */
462 return 0x50000000; /* PL */
464 return 0xB0000000; /* LT */
466 return 0xA0000000; /* GE */
468 return 0xD0000000; /* LE */
470 return 0xC0000000; /* GT */
472 tcc_error("unexpected condition code");
473 return 0xE0000000; /* AL */
476 static int negcc(int cc
)
505 tcc_error("unexpected condition code");
509 /* load 'r' from value 'sv' */
510 void load(int r
, SValue
*sv
)
512 int v
, ft
, fc
, fr
, sign
;
529 uint32_t base
= 0xB; // fp
532 v1
.r
= VT_LOCAL
| VT_LVAL
;
534 load(base
=14 /* lr */, &v1
);
537 } else if(v
== VT_CONST
) {
545 } else if(v
< VT_CONST
) {
552 calcaddr(&base
,&fc
,&sign
,1020,2);
554 op
=0xED100A00; /* flds */
557 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
558 op
|=0x100; /* flds -> fldd */
559 o(op
|(vfpr(r
)<<12)|(fc
>>2)|(base
<<16));
564 #if LDOUBLE_SIZE == 8
565 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
568 if ((ft
& VT_BTYPE
) == VT_DOUBLE
)
570 else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
)
573 o(op
|(fpr(r
)<<12)|(fc
>>2)|(base
<<16));
575 } else if((ft
& (VT_BTYPE
|VT_UNSIGNED
)) == VT_BYTE
576 || (ft
& VT_BTYPE
) == VT_SHORT
) {
577 calcaddr(&base
,&fc
,&sign
,255,0);
579 if ((ft
& VT_BTYPE
) == VT_SHORT
)
581 if ((ft
& VT_UNSIGNED
) == 0)
585 o(op
|(intr(r
)<<12)|(base
<<16)|((fc
&0xf0)<<4)|(fc
&0xf));
587 calcaddr(&base
,&fc
,&sign
,4095,0);
591 if ((ft
& VT_BTYPE
) == VT_BYTE
|| (ft
& VT_BTYPE
) == VT_BOOL
)
593 o(op
|(intr(r
)<<12)|fc
|(base
<<16));
599 op
=stuff_const(0xE3A00000|(intr(r
)<<12),sv
->c
.ul
);
600 if (fr
& VT_SYM
|| !op
) {
601 o(0xE59F0000|(intr(r
)<<12));
604 greloc(cur_text_section
, sv
->sym
, ind
, R_ARM_ABS32
);
609 } else if (v
== VT_LOCAL
) {
610 op
=stuff_const(0xE28B0000|(intr(r
)<<12),sv
->c
.ul
);
611 if (fr
& VT_SYM
|| !op
) {
612 o(0xE59F0000|(intr(r
)<<12));
614 if(fr
& VT_SYM
) // needed ?
615 greloc(cur_text_section
, sv
->sym
, ind
, R_ARM_ABS32
);
617 o(0xE08B0000|(intr(r
)<<12)|intr(r
));
621 } else if(v
== VT_CMP
) {
622 o(mapcc(sv
->c
.ul
)|0x3A00001|(intr(r
)<<12));
623 o(mapcc(negcc(sv
->c
.ul
))|0x3A00000|(intr(r
)<<12));
625 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
628 o(0xE3A00000|(intr(r
)<<12)|t
);
631 o(0xE3A00000|(intr(r
)<<12)|(t
^1));
633 } else if (v
< VT_CONST
) {
636 o(0xEEB00A40|(vfpr(r
)<<12)|vfpr(v
)|T2CPR(ft
)); /* fcpyX */
638 o(0xEE008180|(fpr(r
)<<12)|fpr(v
));
641 o(0xE1A00000|(intr(r
)<<12)|intr(v
));
645 tcc_error("load unimplemented!");
648 /* store register 'r' in lvalue 'v' */
649 void store(int r
, SValue
*sv
)
652 int v
, ft
, fc
, fr
, sign
;
667 if (fr
& VT_LVAL
|| fr
== VT_LOCAL
) {
673 } else if(v
== VT_CONST
) {
684 calcaddr(&base
,&fc
,&sign
,1020,2);
686 op
=0xED000A00; /* fsts */
689 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
690 op
|=0x100; /* fsts -> fstd */
691 o(op
|(vfpr(r
)<<12)|(fc
>>2)|(base
<<16));
696 #if LDOUBLE_SIZE == 8
697 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
700 if ((ft
& VT_BTYPE
) == VT_DOUBLE
)
702 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
)
705 o(op
|(fpr(r
)<<12)|(fc
>>2)|(base
<<16));
708 } else if((ft
& VT_BTYPE
) == VT_SHORT
) {
709 calcaddr(&base
,&fc
,&sign
,255,0);
713 o(op
|(intr(r
)<<12)|(base
<<16)|((fc
&0xf0)<<4)|(fc
&0xf));
715 calcaddr(&base
,&fc
,&sign
,4095,0);
719 if ((ft
& VT_BTYPE
) == VT_BYTE
|| (ft
& VT_BTYPE
) == VT_BOOL
)
721 o(op
|(intr(r
)<<12)|fc
|(base
<<16));
726 tcc_error("store unimplemented");
729 static void gadd_sp(int val
)
731 stuff_const_harder(0xE28DD000,val
);
734 /* 'is_jmp' is '1' if it is a jump */
735 static void gcall_or_jmp(int is_jmp
)
738 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
741 x
=encbranch(ind
,ind
+vtop
->c
.ul
,0);
743 if (vtop
->r
& VT_SYM
) {
744 /* relocation case */
745 greloc(cur_text_section
, vtop
->sym
, ind
, R_ARM_PC24
);
747 put_elf_reloc(symtab_section
, cur_text_section
, ind
, R_ARM_PC24
, 0);
748 o(x
|(is_jmp
?0xE0000000:0xE1000000));
751 o(0xE28FE004); // add lr,pc,#4
752 o(0xE51FF004); // ldr pc,[pc,#-4]
753 if (vtop
->r
& VT_SYM
)
754 greloc(cur_text_section
, vtop
->sym
, ind
, R_ARM_ABS32
);
758 /* otherwise, indirect call */
761 o(0xE1A0E00F); // mov lr,pc
762 o(0xE1A0F000|intr(r
)); // mov pc,r
766 /* Return whether a structure is an homogeneous float aggregate or not.
767 The answer is true if all the elements of the structure are of the same
768 primitive float type and there is less than 4 elements.
770 type: the type corresponding to the structure to be tested */
771 static int is_hgen_float_aggr(CType
*type
)
773 if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
775 int btype
, nb_fields
= 0;
777 ref
= type
->ref
->next
;
778 btype
= ref
->type
.t
& VT_BTYPE
;
779 if (btype
== VT_FLOAT
|| btype
== VT_DOUBLE
) {
780 for(; ref
&& btype
== (ref
->type
.t
& VT_BTYPE
); ref
= ref
->next
, nb_fields
++);
781 return !ref
&& nb_fields
<= 4;
788 signed char avail
[3]; /* 3 holes max with only float and double alignments */
789 int first_hole
; /* first available hole */
790 int last_hole
; /* last available hole (none if equal to first_hole) */
791 int first_free_reg
; /* next free register in the sequence, hole excluded */
794 #define AVAIL_REGS_INITIALIZER (struct avail_regs) { { 0, 0, 0}, 0, 0, 0 }
796 /* Find suitable registers for a VFP Co-Processor Register Candidate (VFP CPRC
797 param) according to the rules described in the procedure call standard for
798 the ARM architecture (AAPCS). If found, the registers are assigned to this
799 VFP CPRC parameter. Registers are allocated in sequence unless a hole exists
800 and the parameter is a single float.
802 avregs: opaque structure to keep track of available VFP co-processor regs
803 align: alignment contraints for the param, as returned by type_size()
804 size: size of the parameter, as returned by type_size() */
805 int assign_vfpreg(struct avail_regs
*avregs
, int align
, int size
)
809 if (avregs
->first_free_reg
== -1)
811 if (align
>> 3) { /* double alignment */
812 first_reg
= avregs
->first_free_reg
;
813 /* alignment contraint not respected so use next reg and record hole */
815 avregs
->avail
[avregs
->last_hole
++] = first_reg
++;
816 } else { /* no special alignment (float or array of float) */
817 /* if single float and a hole is available, assign the param to it */
818 if (size
== 4 && avregs
->first_hole
!= avregs
->last_hole
)
819 return avregs
->avail
[avregs
->first_hole
++];
821 first_reg
= avregs
->first_free_reg
;
823 if (first_reg
+ size
/ 4 <= 16) {
824 avregs
->first_free_reg
= first_reg
+ size
/ 4;
827 avregs
->first_free_reg
= -1;
831 /* Returns whether all params need to be passed in core registers or not.
832 This is the case for function part of the runtime ABI. */
833 int floats_in_core_regs(SValue
*sval
)
838 switch (sval
->sym
->v
) {
839 case TOK___floatundisf
:
840 case TOK___floatundidf
:
841 case TOK___fixunssfdi
:
842 case TOK___fixunsdfdi
:
844 case TOK___fixunsxfdi
:
846 case TOK___floatdisf
:
847 case TOK___floatdidf
:
857 /* Return the number of registers needed to return the struct, or 0 if
858 returning via struct pointer. */
859 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
) {
862 size
= type_size(vt
, &align
);
863 if (float_abi
== ARM_HARD_FLOAT
&& !variadic
&&
864 (is_float(vt
->t
) || is_hgen_float_aggr(vt
))) {
868 return (size
+ 7) >> 3;
869 } else if (size
<= 4) {
881 /* Parameters are classified according to how they are copied to their final
882 destination for the function call. Because the copying is performed class
883 after class according to the order in the union below, it is important that
884 some constraints about the order of the members of this union are respected:
885 - CORE_STRUCT_CLASS must come after STACK_CLASS;
886 - CORE_CLASS must come after STACK_CLASS, CORE_STRUCT_CLASS and
888 - VFP_STRUCT_CLASS must come after VFP_CLASS.
889 See the comment for the main loop in copy_params() for the reason. */
900 int start
; /* first reg or addr used depending on the class */
901 int end
; /* last reg used or next free addr depending on the class */
902 SValue
*sval
; /* pointer to SValue on the value stack */
903 struct param_plan
*prev
; /* previous element in this class */
907 struct param_plan
*pplans
; /* array of all the param plans */
908 struct param_plan
*clsplans
[NB_CLASSES
]; /* per class lists of param plans */
911 #define add_param_plan(plan,pplan,class) \
913 pplan.prev = plan->clsplans[class]; \
914 plan->pplans[plan ## _nb] = pplan; \
915 plan->clsplans[class] = &plan->pplans[plan ## _nb++]; \
918 /* Assign parameters to registers and stack with alignment according to the
919 rules in the procedure call standard for the ARM architecture (AAPCS).
920 The overall assignment is recorded in an array of per parameter structures
921 called parameter plans. The parameter plans are also further organized in a
922 number of linked lists, one per class of parameter (see the comment for the
923 definition of union reg_class).
925 nb_args: number of parameters of the function for which a call is generated
926 corefloat: whether to pass float via core registers or not
927 plan: the structure where the overall assignment is recorded
928 todo: a bitmap that record which core registers hold a parameter
930 Returns the amount of stack space needed for parameter passing
932 Note: this function allocated an array in plan->pplans with tcc_malloc. It
933 is the responsability of the caller to free this array once used (ie not
934 before copy_params). */
935 static int assign_regs(int nb_args
, int corefloat
, struct plan
*plan
, int *todo
)
938 int ncrn
/* next core register number */, nsaa
/* next stacked argument address*/;
940 struct param_plan pplan
;
941 struct avail_regs avregs
= AVAIL_REGS_INITIALIZER
;
945 plan
->pplans
= tcc_malloc(nb_args
* sizeof(*plan
->pplans
));
946 memset(plan
->clsplans
, 0, sizeof(plan
->clsplans
));
947 for(i
= nb_args
; i
-- ;) {
948 int j
, start_vfpreg
= 0;
949 size
= type_size(&vtop
[-i
].type
, &align
);
950 switch(vtop
[-i
].type
.t
& VT_BTYPE
) {
956 int is_hfa
= 0; /* Homogeneous float aggregate */
958 if (is_float(vtop
[-i
].type
.t
)
959 || (is_hfa
= is_hgen_float_aggr(&vtop
[-i
].type
))) {
962 start_vfpreg
= assign_vfpreg(&avregs
, align
, size
);
963 end_vfpreg
= start_vfpreg
+ ((size
- 1) >> 2);
964 if (start_vfpreg
>= 0) {
965 pplan
= (struct param_plan
) {start_vfpreg
, end_vfpreg
, &vtop
[-i
]};
967 add_param_plan(plan
, pplan
, VFP_STRUCT_CLASS
);
969 add_param_plan(plan
, pplan
, VFP_CLASS
);
975 ncrn
= (ncrn
+ (align
-1)/4) & -(align
/4);
976 size
= (size
+ 3) & -4;
977 if (ncrn
+ size
/4 <= 4 || (ncrn
< 4 && start_vfpreg
!= -1)) {
978 /* The parameter is allocated both in core register and on stack. As
979 * such, it can be of either class: it would either be the last of
980 * CORE_STRUCT_CLASS or the first of STACK_CLASS. */
981 for (j
= ncrn
; j
< 4 && j
< ncrn
+ size
/ 4; j
++)
983 pplan
= (struct param_plan
) {ncrn
, j
, &vtop
[-i
]};
984 add_param_plan(plan
, pplan
, CORE_STRUCT_CLASS
);
987 nsaa
= (ncrn
- 4) * 4;
995 int is_long
= (vtop
[-i
].type
.t
& VT_BTYPE
) == VT_LLONG
;
998 ncrn
= (ncrn
+ 1) & -2;
1002 pplan
= (struct param_plan
) {ncrn
, ncrn
, &vtop
[-i
]};
1006 add_param_plan(plan
, pplan
, CORE_CLASS
);
1010 nsaa
= (nsaa
+ (align
- 1)) & ~(align
- 1);
1011 pplan
= (struct param_plan
) {nsaa
, nsaa
+ size
, &vtop
[-i
]};
1012 add_param_plan(plan
, pplan
, STACK_CLASS
);
1013 nsaa
+= size
; /* size already rounded up before */
1018 #undef add_param_plan
1020 /* Copy parameters to their final destination (core reg, VFP reg or stack) for
1023 nb_args: number of parameters the function take
1024 plan: the overall assignment plan for parameters
1025 todo: a bitmap indicating what core reg will hold a parameter
1027 Returns the number of SValue added by this function on the value stack */
1028 static int copy_params(int nb_args
, struct plan
*plan
, int todo
)
1030 int size
, align
, r
, i
, nb_extra_sval
= 0;
1031 struct param_plan
*pplan
;
1033 /* Several constraints require parameters to be copied in a specific order:
1034 - structures are copied to the stack before being loaded in a reg;
1035 - floats loaded to an odd numbered VFP reg are first copied to the
1036 preceding even numbered VFP reg and then moved to the next VFP reg.
1038 It is thus important that:
1039 - structures assigned to core regs must be copied after parameters
1040 assigned to the stack but before structures assigned to VFP regs because
1041 a structure can lie partly in core registers and partly on the stack;
1042 - parameters assigned to the stack and all structures be copied before
1043 parameters assigned to a core reg since copying a parameter to the stack
1044 require using a core reg;
1045 - parameters assigned to VFP regs be copied before structures assigned to
1046 VFP regs as the copy might use an even numbered VFP reg that already
1047 holds part of a structure. */
1048 for(i
= 0; i
< NB_CLASSES
; i
++) {
1049 for(pplan
= plan
->clsplans
[i
]; pplan
; pplan
= pplan
->prev
) {
1050 vpushv(pplan
->sval
);
1051 pplan
->sval
->r
= pplan
->sval
->r2
= VT_CONST
; /* disable entry */
1054 case CORE_STRUCT_CLASS
:
1055 case VFP_STRUCT_CLASS
:
1056 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_STRUCT
) {
1058 size
= type_size(&pplan
->sval
->type
, &align
);
1059 /* align to stack align size */
1060 size
= (size
+ 3) & ~3;
1061 if (i
== STACK_CLASS
&& pplan
->prev
)
1062 padding
= pplan
->start
- pplan
->prev
->end
;
1063 size
+= padding
; /* Add padding if any */
1064 /* allocate the necessary size on stack */
1066 /* generate structure store */
1067 r
= get_reg(RC_INT
);
1068 o(0xE28D0000|(intr(r
)<<12)|padding
); /* add r, sp, padding */
1069 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1071 vstore(); /* memcpy to current sp + potential padding */
1073 /* Homogeneous float aggregate are loaded to VFP registers
1074 immediately since there is no way of loading data in multiple
1075 non consecutive VFP registers as what is done for other
1076 structures (see the use of todo). */
1077 if (i
== VFP_STRUCT_CLASS
) {
1078 int first
= pplan
->start
, nb
= pplan
->end
- first
+ 1;
1079 /* vpop.32 {pplan->start, ..., pplan->end} */
1080 o(0xECBD0A00|(first
&1)<<22|(first
>>1)<<12|nb
);
1081 /* No need to write the register used to a SValue since VFP regs
1082 cannot be used for gcall_or_jmp */
1085 if (is_float(pplan
->sval
->type
.t
)) {
1087 r
= vfpr(gv(RC_FLOAT
)) << 12;
1088 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_FLOAT
)
1092 r
|= 0x101; /* vpush.32 -> vpush.64 */
1094 o(0xED2D0A01 + r
); /* vpush */
1096 r
= fpr(gv(RC_FLOAT
)) << 12;
1097 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_FLOAT
)
1099 else if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1102 size
= LDOUBLE_SIZE
;
1109 o(0xED2D0100|r
|(size
>>2)); /* some kind of vpush for FPA */
1112 /* simple type (currently always same size) */
1113 /* XXX: implicit cast ? */
1115 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1119 o(0xE52D0004|(intr(r
)<<12)); /* push r */
1123 o(0xE52D0004|(intr(r
)<<12)); /* push r */
1125 if (i
== STACK_CLASS
&& pplan
->prev
)
1126 gadd_sp(pplan
->prev
->end
- pplan
->start
); /* Add padding if any */
1131 gv(regmask(TREG_F0
+ (pplan
->start
>> 1)));
1132 if (pplan
->start
& 1) { /* Must be in upper part of double register */
1133 o(0xEEF00A40|((pplan
->start
>>1)<<12)|(pplan
->start
>>1)); /* vmov.f32 s(n+1), sn */
1134 vtop
->r
= VT_CONST
; /* avoid being saved on stack by gv for next float */
1139 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1141 gv(regmask(pplan
->end
));
1142 pplan
->sval
->r2
= vtop
->r
;
1145 gv(regmask(pplan
->start
));
1146 /* Mark register as used so that gcall_or_jmp use another one
1147 (regs >=4 are free as never used to pass parameters) */
1148 pplan
->sval
->r
= vtop
->r
;
1155 /* Manually free remaining registers since next parameters are loaded
1156 * manually, without the help of gv(int). */
1160 o(0xE8BD0000|todo
); /* pop {todo} */
1161 for(pplan
= plan
->clsplans
[CORE_STRUCT_CLASS
]; pplan
; pplan
= pplan
->prev
) {
1163 pplan
->sval
->r
= pplan
->start
;
1164 /* An SValue can only pin 2 registers at best (r and r2) but a structure
1165 can occupy more than 2 registers. Thus, we need to push on the value
1166 stack some fake parameter to have on SValue for each registers used
1167 by a structure (r2 is not used). */
1168 for (r
= pplan
->start
+ 1; r
<= pplan
->end
; r
++) {
1169 if (todo
& (1 << r
)) {
1177 return nb_extra_sval
;
1180 /* Generate function call. The function address is pushed first, then
1181 all the parameters in call order. This functions pops all the
1182 parameters and the function address. */
1183 void gfunc_call(int nb_args
)
1186 int variadic
, corefloat
= 1;
1191 if (float_abi
== ARM_HARD_FLOAT
) {
1192 variadic
= (vtop
[-nb_args
].type
.ref
->c
== FUNC_ELLIPSIS
);
1193 corefloat
= variadic
|| floats_in_core_regs(&vtop
[-nb_args
]);
1196 /* cannot let cpu flags if other instruction are generated. Also avoid leaving
1197 VT_JMP anywhere except on the top of the stack because it would complicate
1198 the code generator. */
1199 r
= vtop
->r
& VT_VALMASK
;
1200 if (r
== VT_CMP
|| (r
& ~1) == VT_JMP
)
1203 args_size
= assign_regs(nb_args
, corefloat
, &plan
, &todo
);
1206 if (args_size
& 7) { /* Stack must be 8 byte aligned at fct call for EABI */
1207 args_size
= (args_size
+ 7) & ~7;
1208 o(0xE24DD004); /* sub sp, sp, #4 */
1212 nb_args
+= copy_params(nb_args
, &plan
, todo
);
1213 tcc_free(plan
.pplans
);
1215 /* Move fct SValue on top as required by gcall_or_jmp */
1219 gadd_sp(args_size
); /* pop all parameters passed on the stack */
1220 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
1221 if(float_abi
== ARM_SOFTFP_FLOAT
&& corefloat
&&
1222 is_float(vtop
->type
.ref
->type
.t
)) {
1223 if((vtop
->type
.ref
->type
.t
& VT_BTYPE
) == VT_FLOAT
) {
1224 o(0xEE000A10); /*vmov s0, r0 */
1226 o(0xEE000B10); /* vmov.32 d0[0], r0 */
1227 o(0xEE201B10); /* vmov.32 d0[1], r1 */
1231 vtop
-= nb_args
+ 1; /* Pop all params and fct address from value stack */
1232 leaffunc
= 0; /* we are calling a function, so we aren't in a leaf function */
1235 /* generate function prolog of type 't' */
1236 void gfunc_prolog(CType
*func_type
)
1239 int n
, nf
, size
, align
, struct_ret
= 0;
1240 int addr
, pn
, sn
; /* pn=core, sn=stack */
1241 struct avail_regs avregs
= AVAIL_REGS_INITIALIZER
;
1244 sym
= func_type
->ref
;
1245 func_vt
= sym
->type
;
1246 func_var
= (func_type
->ref
->c
== FUNC_ELLIPSIS
);
1249 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
&&
1250 !gfunc_sret(&func_vt
, func_var
, &ret_type
, &align
))
1254 func_vc
= 12; /* Offset from fp of the place to store the result */
1256 for(sym2
= sym
->next
; sym2
&& (n
< 4 || nf
< 16); sym2
= sym2
->next
) {
1257 size
= type_size(&sym2
->type
, &align
);
1259 if (float_abi
== ARM_HARD_FLOAT
&& !func_var
&&
1260 (is_float(sym2
->type
.t
) || is_hgen_float_aggr(&sym2
->type
))) {
1261 int tmpnf
= assign_vfpreg(&avregs
, align
, size
);
1262 tmpnf
+= (size
+ 3) / 4;
1263 nf
= (tmpnf
> nf
) ? tmpnf
: nf
;
1267 n
+= (size
+ 3) / 4;
1269 o(0xE1A0C00D); /* mov ip,sp */
1278 o(0xE92D0000|((1<<n
)-1)); /* save r0-r4 on stack if needed */
1283 nf
=(nf
+1)&-2; /* nf => HARDFLOAT => EABI */
1284 o(0xED2D0A00|nf
); /* save s0-s15 on stack if needed */
1286 o(0xE92D5800); /* save fp, ip, lr */
1287 o(0xE1A0B00D); /* mov fp, sp */
1288 func_sub_sp_offset
= ind
;
1289 o(0xE1A00000); /* nop, leave space for stack adjustment in epilog */
1292 if (float_abi
== ARM_HARD_FLOAT
) {
1294 avregs
= AVAIL_REGS_INITIALIZER
;
1297 pn
= struct_ret
, sn
= 0;
1298 while ((sym
= sym
->next
)) {
1301 size
= type_size(type
, &align
);
1302 size
= (size
+ 3) >> 2;
1303 align
= (align
+ 3) & ~3;
1305 if (float_abi
== ARM_HARD_FLOAT
&& !func_var
&& (is_float(sym
->type
.t
)
1306 || is_hgen_float_aggr(&sym
->type
))) {
1307 int fpn
= assign_vfpreg(&avregs
, align
, size
<< 2);
1316 pn
= (pn
+ (align
-1)/4) & -(align
/4);
1318 addr
= (nf
+ pn
) * 4;
1325 sn
= (sn
+ (align
-1)/4) & -(align
/4);
1327 addr
= (n
+ nf
+ sn
) * 4;
1330 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| lvalue_type(type
->t
),
1338 /* generate function epilog */
1339 void gfunc_epilog(void)
1343 /* Copy float return value to core register if base standard is used and
1344 float computation is made with VFP */
1345 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
1346 if ((float_abi
== ARM_SOFTFP_FLOAT
|| func_var
) && is_float(func_vt
.t
)) {
1347 if((func_vt
.t
& VT_BTYPE
) == VT_FLOAT
)
1348 o(0xEE100A10); /* fmrs r0, s0 */
1350 o(0xEE100B10); /* fmrdl r0, d0 */
1351 o(0xEE301B10); /* fmrdh r1, d0 */
1355 o(0xE89BA800); /* restore fp, sp, pc */
1356 diff
= (-loc
+ 3) & -4;
1359 diff
= ((diff
+ 11) & -8) - 4;
1362 x
=stuff_const(0xE24BD000, diff
); /* sub sp,fp,# */
1364 *(uint32_t *)(cur_text_section
->data
+ func_sub_sp_offset
) = x
;
1368 o(0xE59FC004); /* ldr ip,[pc+4] */
1369 o(0xE04BD00C); /* sub sp,fp,ip */
1370 o(0xE1A0F00E); /* mov pc,lr */
1372 *(uint32_t *)(cur_text_section
->data
+ func_sub_sp_offset
) = 0xE1000000|encbranch(func_sub_sp_offset
,addr
,1);
1377 /* generate a jump to a label */
1382 o(0xE0000000|encbranch(r
,t
,1));
1386 /* generate a jump to a fixed address */
1387 void gjmp_addr(int a
)
1392 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1393 int gtst(int inv
, int t
)
1397 v
= vtop
->r
& VT_VALMASK
;
1400 op
=mapcc(inv
?negcc(vtop
->c
.i
):vtop
->c
.i
);
1401 op
|=encbranch(r
,t
,1);
1404 } else { /* VT_JMP || VT_JMPI */
1405 if ((v
& 1) == inv
) {
1414 p
= decbranch(lp
=p
);
1416 x
= (uint32_t *)(cur_text_section
->data
+ lp
);
1418 *x
|= encbranch(lp
,t
,1);
1431 /* generate an integer binary operation */
1432 void gen_opi(int op
)
1435 uint32_t opc
= 0, r
, fr
;
1436 unsigned short retreg
= REG_IRET
;
1444 case TOK_ADDC1
: /* add with carry generation */
1452 case TOK_SUBC1
: /* sub with carry generation */
1456 case TOK_ADDC2
: /* add with carry use */
1460 case TOK_SUBC2
: /* sub with carry use */
1477 gv2(RC_INT
, RC_INT
);
1481 o(0xE0000090|(intr(r
)<<16)|(intr(r
)<<8)|intr(fr
));
1506 func
=TOK___aeabi_idivmod
;
1515 func
=TOK___aeabi_uidivmod
;
1523 gv2(RC_INT
, RC_INT
);
1524 r
=intr(vtop
[-1].r2
=get_reg(RC_INT
));
1526 vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(c
));
1528 o(0xE0800090|(r
<<16)|(intr(vtop
->r
)<<12)|(intr(c
)<<8)|intr(vtop
[1].r
));
1537 if((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1538 if(opc
== 4 || opc
== 5 || opc
== 0xc) {
1540 opc
|=2; // sub -> rsb
1543 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
||
1544 (vtop
->r
& (VT_VALMASK
& ~1)) == VT_JMP
)
1549 opc
=0xE0000000|(opc
<<20)|(c
<<16);
1550 if((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1552 x
=stuff_const(opc
|0x2000000,vtop
->c
.i
);
1554 r
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(vtop
[-1].r
)));
1559 fr
=intr(gv(RC_INT
));
1560 r
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,two2mask(vtop
->r
,vtop
[-1].r
)));
1564 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1570 opc
=0xE1A00000|(opc
<<5);
1571 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
||
1572 (vtop
->r
& (VT_VALMASK
& ~1)) == VT_JMP
)
1578 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1579 fr
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(vtop
[-1].r
)));
1580 c
= vtop
->c
.i
& 0x1f;
1581 o(opc
|(c
<<7)|(fr
<<12));
1583 fr
=intr(gv(RC_INT
));
1584 c
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,two2mask(vtop
->r
,vtop
[-1].r
)));
1585 o(opc
|(c
<<12)|(fr
<<8)|0x10);
1590 vpush_global_sym(&func_old_type
, func
);
1597 tcc_error("gen_opi %i unimplemented!",op
);
1602 static int is_zero(int i
)
1604 if((vtop
[i
].r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) != VT_CONST
)
1606 if (vtop
[i
].type
.t
== VT_FLOAT
)
1607 return (vtop
[i
].c
.f
== 0.f
);
1608 else if (vtop
[i
].type
.t
== VT_DOUBLE
)
1609 return (vtop
[i
].c
.d
== 0.0);
1610 return (vtop
[i
].c
.ld
== 0.l
);
1613 /* generate a floating point operation 'v = t1 op t2' instruction. The
1614 * two operands are guaranted to have the same floating point type */
1615 void gen_opf(int op
)
1619 x
=0xEE000A00|T2CPR(vtop
->type
.t
);
1637 x
|=0x810000; /* fsubX -> fnegX */
1650 if(op
< TOK_ULT
|| op
> TOK_GT
) {
1651 tcc_error("unknown fp op %x!",op
);
1657 case TOK_LT
: op
=TOK_GT
; break;
1658 case TOK_GE
: op
=TOK_ULE
; break;
1659 case TOK_LE
: op
=TOK_GE
; break;
1660 case TOK_GT
: op
=TOK_ULT
; break;
1663 x
|=0xB40040; /* fcmpX */
1664 if(op
!=TOK_EQ
&& op
!=TOK_NE
)
1665 x
|=0x80; /* fcmpX -> fcmpeX */
1668 o(x
|0x10000|(vfpr(gv(RC_FLOAT
))<<12)); /* fcmp(e)X -> fcmp(e)zX */
1670 x
|=vfpr(gv(RC_FLOAT
));
1672 o(x
|(vfpr(gv(RC_FLOAT
))<<12));
1675 o(0xEEF1FA10); /* fmstat */
1678 case TOK_LE
: op
=TOK_ULE
; break;
1679 case TOK_LT
: op
=TOK_ULT
; break;
1680 case TOK_UGE
: op
=TOK_GE
; break;
1681 case TOK_UGT
: op
=TOK_GT
; break;
1698 vtop
->r
=get_reg_ex(RC_FLOAT
,r
);
1701 o(x
|(vfpr(vtop
->r
)<<12));
1705 static uint32_t is_fconst()
1709 if((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) != VT_CONST
)
1711 if (vtop
->type
.t
== VT_FLOAT
)
1713 else if (vtop
->type
.t
== VT_DOUBLE
)
1743 /* generate a floating point operation 'v = t1 op t2' instruction. The
1744 two operands are guaranted to have the same floating point type */
1745 void gen_opf(int op
)
1747 uint32_t x
, r
, r2
, c1
, c2
;
1748 //fputs("gen_opf\n",stderr);
1754 #if LDOUBLE_SIZE == 8
1755 if ((vtop
->type
.t
& VT_BTYPE
) != VT_FLOAT
)
1758 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1760 else if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
)
1771 r
=fpr(gv(RC_FLOAT
));
1778 r2
=fpr(gv(RC_FLOAT
));
1787 r
=fpr(gv(RC_FLOAT
));
1789 } else if(c1
&& c1
<=0xf) {
1792 r
=fpr(gv(RC_FLOAT
));
1797 r
=fpr(gv(RC_FLOAT
));
1799 r2
=fpr(gv(RC_FLOAT
));
1808 r
=fpr(gv(RC_FLOAT
));
1813 r2
=fpr(gv(RC_FLOAT
));
1821 r
=fpr(gv(RC_FLOAT
));
1823 } else if(c1
&& c1
<=0xf) {
1826 r
=fpr(gv(RC_FLOAT
));
1831 r
=fpr(gv(RC_FLOAT
));
1833 r2
=fpr(gv(RC_FLOAT
));
1837 if(op
>= TOK_ULT
&& op
<= TOK_GT
) {
1838 x
|=0xd0f110; // cmfe
1839 /* bug (intention?) in Linux FPU emulator
1840 doesn't set carry if equal */
1846 tcc_error("unsigned comparision on floats?");
1852 op
=TOK_ULE
; /* correct in unordered case only if AC bit in FPSR set */
1856 x
&=~0x400000; // cmfe -> cmf
1878 r
=fpr(gv(RC_FLOAT
));
1885 r2
=fpr(gv(RC_FLOAT
));
1887 vtop
[-1].r
= VT_CMP
;
1890 tcc_error("unknown fp op %x!",op
);
1894 if(vtop
[-1].r
== VT_CMP
)
1900 vtop
[-1].r
=get_reg_ex(RC_FLOAT
,two2mask(vtop
[-1].r
,c1
));
1904 o(x
|(r
<<16)|(c1
<<12)|r2
);
1908 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1909 and 'long long' cases. */
1910 ST_FUNC
void gen_cvt_itof1(int t
)
1914 bt
=vtop
->type
.t
& VT_BTYPE
;
1915 if(bt
== VT_INT
|| bt
== VT_SHORT
|| bt
== VT_BYTE
) {
1921 r2
=vfpr(vtop
->r
=get_reg(RC_FLOAT
));
1922 o(0xEE000A10|(r
<<12)|(r2
<<16)); /* fmsr */
1924 if(!(vtop
->type
.t
& VT_UNSIGNED
))
1925 r2
|=0x80; /* fuitoX -> fsituX */
1926 o(0xEEB80A40|r2
|T2CPR(t
)); /* fYitoX*/
1928 r2
=fpr(vtop
->r
=get_reg(RC_FLOAT
));
1929 if((t
& VT_BTYPE
) != VT_FLOAT
)
1930 dsize
=0x80; /* flts -> fltd */
1931 o(0xEE000110|dsize
|(r2
<<16)|(r
<<12)); /* flts */
1932 if((vtop
->type
.t
& (VT_UNSIGNED
|VT_BTYPE
)) == (VT_UNSIGNED
|VT_INT
)) {
1934 o(0xE3500000|(r
<<12)); /* cmp */
1935 r
=fpr(get_reg(RC_FLOAT
));
1936 if(last_itod_magic
) {
1937 off
=ind
+8-last_itod_magic
;
1942 o(0xBD1F0100|(r
<<12)|off
); /* ldflts */
1944 o(0xEA000000); /* b */
1945 last_itod_magic
=ind
;
1946 o(0x4F800000); /* 4294967296.0f */
1948 o(0xBE000100|dsize
|(r2
<<16)|(r2
<<12)|r
); /* adflt */
1952 } else if(bt
== VT_LLONG
) {
1954 CType
*func_type
= 0;
1955 if((t
& VT_BTYPE
) == VT_FLOAT
) {
1956 func_type
= &func_float_type
;
1957 if(vtop
->type
.t
& VT_UNSIGNED
)
1958 func
=TOK___floatundisf
;
1960 func
=TOK___floatdisf
;
1961 #if LDOUBLE_SIZE != 8
1962 } else if((t
& VT_BTYPE
) == VT_LDOUBLE
) {
1963 func_type
= &func_ldouble_type
;
1964 if(vtop
->type
.t
& VT_UNSIGNED
)
1965 func
=TOK___floatundixf
;
1967 func
=TOK___floatdixf
;
1968 } else if((t
& VT_BTYPE
) == VT_DOUBLE
) {
1970 } else if((t
& VT_BTYPE
) == VT_DOUBLE
|| (t
& VT_BTYPE
) == VT_LDOUBLE
) {
1972 func_type
= &func_double_type
;
1973 if(vtop
->type
.t
& VT_UNSIGNED
)
1974 func
=TOK___floatundidf
;
1976 func
=TOK___floatdidf
;
1979 vpush_global_sym(func_type
, func
);
1987 tcc_error("unimplemented gen_cvt_itof %x!",vtop
->type
.t
);
1990 /* convert fp to int 't' type */
1991 void gen_cvt_ftoi(int t
)
1997 r2
=vtop
->type
.t
& VT_BTYPE
;
2000 r
=vfpr(gv(RC_FLOAT
));
2002 o(0xEEBC0AC0|(r
<<12)|r
|T2CPR(r2
)|u
); /* ftoXizY */
2003 r2
=intr(vtop
->r
=get_reg(RC_INT
));
2004 o(0xEE100A10|(r
<<16)|(r2
<<12));
2009 func
=TOK___fixunssfsi
;
2010 #if LDOUBLE_SIZE != 8
2011 else if(r2
== VT_LDOUBLE
)
2012 func
=TOK___fixunsxfsi
;
2013 else if(r2
== VT_DOUBLE
)
2015 else if(r2
== VT_LDOUBLE
|| r2
== VT_DOUBLE
)
2017 func
=TOK___fixunsdfsi
;
2019 r
=fpr(gv(RC_FLOAT
));
2020 r2
=intr(vtop
->r
=get_reg(RC_INT
));
2021 o(0xEE100170|(r2
<<12)|r
);
2025 } else if(t
== VT_LLONG
) { // unsigned handled in gen_cvt_ftoi1
2028 #if LDOUBLE_SIZE != 8
2029 else if(r2
== VT_LDOUBLE
)
2031 else if(r2
== VT_DOUBLE
)
2033 else if(r2
== VT_LDOUBLE
|| r2
== VT_DOUBLE
)
2038 vpush_global_sym(&func_old_type
, func
);
2043 vtop
->r2
= REG_LRET
;
2047 tcc_error("unimplemented gen_cvt_ftoi!");
2050 /* convert from one floating point type to another */
2051 void gen_cvt_ftof(int t
)
2054 if(((vtop
->type
.t
& VT_BTYPE
) == VT_FLOAT
) != ((t
& VT_BTYPE
) == VT_FLOAT
)) {
2055 uint32_t r
= vfpr(gv(RC_FLOAT
));
2056 o(0xEEB70AC0|(r
<<12)|r
|T2CPR(vtop
->type
.t
));
2059 /* all we have to do on i386 and FPA ARM is to put the float in a register */
2064 /* computed goto support */
2071 /* Save the stack pointer onto the stack and return the location of its address */
2072 ST_FUNC
void gen_vla_sp_save(int addr
) {
2073 tcc_error("variable length arrays unsupported for this target");
2076 /* Restore the SP from a location on the stack */
2077 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2078 tcc_error("variable length arrays unsupported for this target");
2081 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2082 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2083 tcc_error("variable length arrays unsupported for this target");
2086 /* end of ARM code generator */
2087 /*************************************************************/
2089 /*************************************************************/