2 * ARMv4 code generator for TCC
4 * Copyright (c) 2003 Daniel Glöckner
5 * Copyright (c) 2012 Thomas Preud'homme
7 * Based on i386-gen.c by Fabrice Bellard
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #ifdef TARGET_DEFS_ONLY
26 #if defined(TCC_ARM_EABI) && !defined(TCC_ARM_VFP)
27 #error "Currently TinyCC only supports float computation with VFP instructions"
30 /* number of available registers */
37 #ifndef TCC_CPU_VERSION
38 # define TCC_CPU_VERSION 5
41 /* a register can belong to several classes. The classes must be
42 sorted from more general to more precise (see gv2() code which does
43 assumptions on it). */
44 #define RC_INT 0x0001 /* generic integer register */
45 #define RC_FLOAT 0x0002 /* generic float register */
61 #define RC_IRET RC_R0 /* function return: integer register */
62 #define RC_LRET RC_R1 /* function return: second integer register */
63 #define RC_FRET RC_F0 /* function return: float register */
65 /* pretty names for the registers */
87 #define T2CPR(t) (((t) & VT_BTYPE) != VT_FLOAT ? 0x100 : 0)
90 /* return registers for function */
91 #define REG_IRET TREG_R0 /* single word int return register */
92 #define REG_LRET TREG_R1 /* second word return register (for long long) */
93 #define REG_FRET TREG_F0 /* float return register */
96 #define TOK___divdi3 TOK___aeabi_ldivmod
97 #define TOK___moddi3 TOK___aeabi_ldivmod
98 #define TOK___udivdi3 TOK___aeabi_uldivmod
99 #define TOK___umoddi3 TOK___aeabi_uldivmod
102 /* defined if function parameters must be evaluated in reverse order */
103 #define INVERT_FUNC_PARAMS
105 /* defined if structures are passed as pointers. Otherwise structures
106 are directly pushed on stack. */
107 /* #define FUNC_STRUCT_PARAM_AS_PTR */
109 /* pointer size, in bytes */
112 /* long double size and alignment, in bytes */
114 #define LDOUBLE_SIZE 8
118 #define LDOUBLE_SIZE 8
122 #define LDOUBLE_ALIGN 8
124 #define LDOUBLE_ALIGN 4
127 /* maximum alignment (for aligned attribute support) */
130 #define CHAR_IS_UNSIGNED
132 /******************************************************/
133 #else /* ! TARGET_DEFS_ONLY */
134 /******************************************************/
137 enum float_abi float_abi
;
139 ST_DATA
const int reg_classes
[NB_REGS
] = {
140 /* r0 */ RC_INT
| RC_R0
,
141 /* r1 */ RC_INT
| RC_R1
,
142 /* r2 */ RC_INT
| RC_R2
,
143 /* r3 */ RC_INT
| RC_R3
,
144 /* r12 */ RC_INT
| RC_R12
,
145 /* f0 */ RC_FLOAT
| RC_F0
,
146 /* f1 */ RC_FLOAT
| RC_F1
,
147 /* f2 */ RC_FLOAT
| RC_F2
,
148 /* f3 */ RC_FLOAT
| RC_F3
,
150 /* d4/s8 */ RC_FLOAT
| RC_F4
,
151 /* d5/s10 */ RC_FLOAT
| RC_F5
,
152 /* d6/s12 */ RC_FLOAT
| RC_F6
,
153 /* d7/s14 */ RC_FLOAT
| RC_F7
,
157 static int func_sub_sp_offset
, last_itod_magic
;
160 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
161 static CType float_type
, double_type
, func_float_type
, func_double_type
;
162 ST_FUNC
void arm_init(struct TCCState
*s
)
164 float_type
.t
= VT_FLOAT
;
165 double_type
.t
= VT_DOUBLE
;
166 func_float_type
.t
= VT_FUNC
;
167 func_float_type
.ref
= sym_push(SYM_FIELD
, &float_type
, FUNC_CDECL
, FUNC_OLD
);
168 func_double_type
.t
= VT_FUNC
;
169 func_double_type
.ref
= sym_push(SYM_FIELD
, &double_type
, FUNC_CDECL
, FUNC_OLD
);
171 float_abi
= s
->float_abi
;
172 #ifndef TCC_ARM_HARDFLOAT
173 tcc_warning("soft float ABI currently not supported: default to softfp");
177 #define func_float_type func_old_type
178 #define func_double_type func_old_type
179 #define func_ldouble_type func_old_type
180 ST_FUNC
void arm_init(struct TCCState
*s
)
183 #if !defined (TCC_ARM_VFP)
184 tcc_warning("Support for FPA is deprecated and will be removed in next"
187 #if !defined (TCC_ARM_EABI)
188 tcc_warning("Support for OABI is deprecated and will be removed in next"
195 static int two2mask(int a
,int b
) {
196 return (reg_classes
[a
]|reg_classes
[b
])&~(RC_INT
|RC_FLOAT
);
199 static int regmask(int r
) {
200 return reg_classes
[r
]&~(RC_INT
|RC_FLOAT
);
203 /******************************************************/
205 #if defined(TCC_ARM_EABI) && !defined(CONFIG_TCC_ELFINTERP)
206 const char *default_elfinterp(struct TCCState
*s
)
208 if (s
->float_abi
== ARM_HARD_FLOAT
)
209 return "/lib/ld-linux-armhf.so.3";
211 return "/lib/ld-linux.so.3";
217 /* this is a good place to start adding big-endian support*/
222 if (!cur_text_section
)
223 tcc_error("compiler error! This happens f.ex. if the compiler\n"
224 "can't evaluate constant expressions outside of a function.");
225 if (ind1
> cur_text_section
->data_allocated
)
226 section_realloc(cur_text_section
, ind1
);
227 cur_text_section
->data
[ind
++] = i
&255;
229 cur_text_section
->data
[ind
++] = i
&255;
231 cur_text_section
->data
[ind
++] = i
&255;
233 cur_text_section
->data
[ind
++] = i
;
236 static uint32_t stuff_const(uint32_t op
, uint32_t c
)
239 uint32_t nc
= 0, negop
= 0;
249 case 0x1A00000: //mov
250 case 0x1E00000: //mvn
257 return (op
&0xF010F000)|((op
>>16)&0xF)|0x1E00000;
261 return (op
&0xF010F000)|((op
>>16)&0xF)|0x1A00000;
262 case 0x1C00000: //bic
267 case 0x1800000: //orr
269 return (op
&0xFFF0FFFF)|0x1E00000;
275 if(c
<256) /* catch undefined <<32 */
278 m
=(0xff>>i
)|(0xff<<(32-i
));
280 return op
|(i
<<7)|(c
<<i
)|(c
>>(32-i
));
290 void stuff_const_harder(uint32_t op
, uint32_t v
) {
296 uint32_t a
[16], nv
, no
, o2
, n2
;
299 o2
=(op
&0xfff0ffff)|((op
&0xf000)<<4);;
301 a
[i
]=(a
[i
-1]>>2)|(a
[i
-1]<<30);
303 for(j
=i
<4?i
+12:15;j
>=i
+4;j
--)
304 if((v
&(a
[i
]|a
[j
]))==v
) {
305 o(stuff_const(op
,v
&a
[i
]));
306 o(stuff_const(o2
,v
&a
[j
]));
313 for(j
=i
<4?i
+12:15;j
>=i
+4;j
--)
314 if((nv
&(a
[i
]|a
[j
]))==nv
) {
315 o(stuff_const(no
,nv
&a
[i
]));
316 o(stuff_const(n2
,nv
&a
[j
]));
321 for(k
=i
<4?i
+12:15;k
>=j
+4;k
--)
322 if((v
&(a
[i
]|a
[j
]|a
[k
]))==v
) {
323 o(stuff_const(op
,v
&a
[i
]));
324 o(stuff_const(o2
,v
&a
[j
]));
325 o(stuff_const(o2
,v
&a
[k
]));
332 for(k
=i
<4?i
+12:15;k
>=j
+4;k
--)
333 if((nv
&(a
[i
]|a
[j
]|a
[k
]))==nv
) {
334 o(stuff_const(no
,nv
&a
[i
]));
335 o(stuff_const(n2
,nv
&a
[j
]));
336 o(stuff_const(n2
,nv
&a
[k
]));
339 o(stuff_const(op
,v
&a
[0]));
340 o(stuff_const(o2
,v
&a
[4]));
341 o(stuff_const(o2
,v
&a
[8]));
342 o(stuff_const(o2
,v
&a
[12]));
346 uint32_t encbranch(int pos
, int addr
, int fail
)
350 if(addr
>=0x1000000 || addr
<-0x1000000) {
352 tcc_error("FIXME: function bigger than 32MB");
355 return 0x0A000000|(addr
&0xffffff);
358 int decbranch(int pos
)
361 x
=*(uint32_t *)(cur_text_section
->data
+ pos
);
368 /* output a symbol and patch all calls to it */
369 void gsym_addr(int t
, int a
)
374 x
=(uint32_t *)(cur_text_section
->data
+ t
);
377 *x
=0xE1A00000; // nop
380 *x
|= encbranch(lt
,a
,1);
391 static uint32_t vfpr(int r
)
393 if(r
<TREG_F0
|| r
>TREG_F7
)
394 tcc_error("compiler error! register %i is no vfp register",r
);
398 static uint32_t fpr(int r
)
400 if(r
<TREG_F0
|| r
>TREG_F3
)
401 tcc_error("compiler error! register %i is no fpa register",r
);
406 static uint32_t intr(int r
)
410 if(r
>= TREG_R0
&& r
<= TREG_R3
)
412 if (r
>= TREG_SP
&& r
<= TREG_LR
)
413 return r
+ (13 - TREG_SP
);
414 tcc_error("compiler error! register %i is no int register",r
);
417 static void calcaddr(uint32_t *base
, int *off
, int *sgn
, int maxoff
, unsigned shift
)
419 if(*off
>maxoff
|| *off
&((1<<shift
)-1)) {
426 y
=stuff_const(x
,*off
&~maxoff
);
432 y
=stuff_const(x
,(*off
+maxoff
)&~maxoff
);
436 *off
=((*off
+maxoff
)&~maxoff
)-*off
;
439 stuff_const_harder(x
,*off
&~maxoff
);
444 static uint32_t mapcc(int cc
)
449 return 0x30000000; /* CC/LO */
451 return 0x20000000; /* CS/HS */
453 return 0x00000000; /* EQ */
455 return 0x10000000; /* NE */
457 return 0x90000000; /* LS */
459 return 0x80000000; /* HI */
461 return 0x40000000; /* MI */
463 return 0x50000000; /* PL */
465 return 0xB0000000; /* LT */
467 return 0xA0000000; /* GE */
469 return 0xD0000000; /* LE */
471 return 0xC0000000; /* GT */
473 tcc_error("unexpected condition code");
474 return 0xE0000000; /* AL */
477 static int negcc(int cc
)
506 tcc_error("unexpected condition code");
510 /* load 'r' from value 'sv' */
511 void load(int r
, SValue
*sv
)
513 int v
, ft
, fc
, fr
, sign
;
530 uint32_t base
= 0xB; // fp
533 v1
.r
= VT_LOCAL
| VT_LVAL
;
539 } else if(v
== VT_CONST
) {
548 } else if(v
< VT_CONST
) {
555 calcaddr(&base
,&fc
,&sign
,1020,2);
557 op
=0xED100A00; /* flds */
560 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
561 op
|=0x100; /* flds -> fldd */
562 o(op
|(vfpr(r
)<<12)|(fc
>>2)|(base
<<16));
567 #if LDOUBLE_SIZE == 8
568 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
571 if ((ft
& VT_BTYPE
) == VT_DOUBLE
)
573 else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
)
576 o(op
|(fpr(r
)<<12)|(fc
>>2)|(base
<<16));
578 } else if((ft
& (VT_BTYPE
|VT_UNSIGNED
)) == VT_BYTE
579 || (ft
& VT_BTYPE
) == VT_SHORT
) {
580 calcaddr(&base
,&fc
,&sign
,255,0);
582 if ((ft
& VT_BTYPE
) == VT_SHORT
)
584 if ((ft
& VT_UNSIGNED
) == 0)
588 o(op
|(intr(r
)<<12)|(base
<<16)|((fc
&0xf0)<<4)|(fc
&0xf));
590 calcaddr(&base
,&fc
,&sign
,4095,0);
594 if ((ft
& VT_BTYPE
) == VT_BYTE
|| (ft
& VT_BTYPE
) == VT_BOOL
)
596 o(op
|(intr(r
)<<12)|fc
|(base
<<16));
602 op
=stuff_const(0xE3A00000|(intr(r
)<<12),sv
->c
.i
);
603 if (fr
& VT_SYM
|| !op
) {
604 o(0xE59F0000|(intr(r
)<<12));
607 greloc(cur_text_section
, sv
->sym
, ind
, R_ARM_ABS32
);
612 } else if (v
== VT_LOCAL
) {
613 op
=stuff_const(0xE28B0000|(intr(r
)<<12),sv
->c
.i
);
614 if (fr
& VT_SYM
|| !op
) {
615 o(0xE59F0000|(intr(r
)<<12));
617 if(fr
& VT_SYM
) // needed ?
618 greloc(cur_text_section
, sv
->sym
, ind
, R_ARM_ABS32
);
620 o(0xE08B0000|(intr(r
)<<12)|intr(r
));
624 } else if(v
== VT_CMP
) {
625 o(mapcc(sv
->c
.i
)|0x3A00001|(intr(r
)<<12));
626 o(mapcc(negcc(sv
->c
.i
))|0x3A00000|(intr(r
)<<12));
628 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
631 o(0xE3A00000|(intr(r
)<<12)|t
);
634 o(0xE3A00000|(intr(r
)<<12)|(t
^1));
636 } else if (v
< VT_CONST
) {
639 o(0xEEB00A40|(vfpr(r
)<<12)|vfpr(v
)|T2CPR(ft
)); /* fcpyX */
641 o(0xEE008180|(fpr(r
)<<12)|fpr(v
));
644 o(0xE1A00000|(intr(r
)<<12)|intr(v
));
648 tcc_error("load unimplemented!");
651 /* store register 'r' in lvalue 'v' */
652 void store(int r
, SValue
*sv
)
655 int v
, ft
, fc
, fr
, sign
;
670 if (fr
& VT_LVAL
|| fr
== VT_LOCAL
) {
671 uint32_t base
= 0xb; /* fp */
676 } else if(v
== VT_CONST
) {
688 calcaddr(&base
,&fc
,&sign
,1020,2);
690 op
=0xED000A00; /* fsts */
693 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
694 op
|=0x100; /* fsts -> fstd */
695 o(op
|(vfpr(r
)<<12)|(fc
>>2)|(base
<<16));
700 #if LDOUBLE_SIZE == 8
701 if ((ft
& VT_BTYPE
) != VT_FLOAT
)
704 if ((ft
& VT_BTYPE
) == VT_DOUBLE
)
706 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
)
709 o(op
|(fpr(r
)<<12)|(fc
>>2)|(base
<<16));
712 } else if((ft
& VT_BTYPE
) == VT_SHORT
) {
713 calcaddr(&base
,&fc
,&sign
,255,0);
717 o(op
|(intr(r
)<<12)|(base
<<16)|((fc
&0xf0)<<4)|(fc
&0xf));
719 calcaddr(&base
,&fc
,&sign
,4095,0);
723 if ((ft
& VT_BTYPE
) == VT_BYTE
|| (ft
& VT_BTYPE
) == VT_BOOL
)
725 o(op
|(intr(r
)<<12)|fc
|(base
<<16));
730 tcc_error("store unimplemented");
733 static void gadd_sp(int val
)
735 stuff_const_harder(0xE28DD000,val
);
738 /* 'is_jmp' is '1' if it is a jump */
739 static void gcall_or_jmp(int is_jmp
)
743 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
745 if(vtop
->r
& VT_SYM
){
746 x
=encbranch(ind
,ind
+vtop
->c
.i
,0);
748 /* relocation case */
749 greloc(cur_text_section
, vtop
->sym
, ind
, R_ARM_PC24
);
750 o(x
|(is_jmp
?0xE0000000:0xE1000000));
753 o(0xE28FE004); // add lr,pc,#4
754 o(0xE51FF004); // ldr pc,[pc,#-4]
755 greloc(cur_text_section
, vtop
->sym
, ind
, R_ARM_ABS32
);
760 o(0xE28FE004); // add lr,pc,#4
761 o(0xE51FF004); // ldr pc,[pc,#-4]
765 /* otherwise, indirect call */
768 o(0xE1A0E00F); // mov lr,pc
769 o(0xE1A0F000|intr(r
)); // mov pc,r
773 static int unalias_ldbl(int btype
)
775 #if LDOUBLE_SIZE == 8
776 if (btype
== VT_LDOUBLE
)
782 /* Return whether a structure is an homogeneous float aggregate or not.
783 The answer is true if all the elements of the structure are of the same
784 primitive float type and there is less than 4 elements.
786 type: the type corresponding to the structure to be tested */
787 static int is_hgen_float_aggr(CType
*type
)
789 if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
791 int btype
, nb_fields
= 0;
793 ref
= type
->ref
->next
;
794 btype
= unalias_ldbl(ref
->type
.t
& VT_BTYPE
);
795 if (btype
== VT_FLOAT
|| btype
== VT_DOUBLE
) {
796 for(; ref
&& btype
== unalias_ldbl(ref
->type
.t
& VT_BTYPE
); ref
= ref
->next
, nb_fields
++);
797 return !ref
&& nb_fields
<= 4;
804 signed char avail
[3]; /* 3 holes max with only float and double alignments */
805 int first_hole
; /* first available hole */
806 int last_hole
; /* last available hole (none if equal to first_hole) */
807 int first_free_reg
; /* next free register in the sequence, hole excluded */
810 #define AVAIL_REGS_INITIALIZER (struct avail_regs) { { 0, 0, 0}, 0, 0, 0 }
812 /* Find suitable registers for a VFP Co-Processor Register Candidate (VFP CPRC
813 param) according to the rules described in the procedure call standard for
814 the ARM architecture (AAPCS). If found, the registers are assigned to this
815 VFP CPRC parameter. Registers are allocated in sequence unless a hole exists
816 and the parameter is a single float.
818 avregs: opaque structure to keep track of available VFP co-processor regs
819 align: alignment constraints for the param, as returned by type_size()
820 size: size of the parameter, as returned by type_size() */
821 int assign_vfpreg(struct avail_regs
*avregs
, int align
, int size
)
825 if (avregs
->first_free_reg
== -1)
827 if (align
>> 3) { /* double alignment */
828 first_reg
= avregs
->first_free_reg
;
829 /* alignment constraint not respected so use next reg and record hole */
831 avregs
->avail
[avregs
->last_hole
++] = first_reg
++;
832 } else { /* no special alignment (float or array of float) */
833 /* if single float and a hole is available, assign the param to it */
834 if (size
== 4 && avregs
->first_hole
!= avregs
->last_hole
)
835 return avregs
->avail
[avregs
->first_hole
++];
837 first_reg
= avregs
->first_free_reg
;
839 if (first_reg
+ size
/ 4 <= 16) {
840 avregs
->first_free_reg
= first_reg
+ size
/ 4;
843 avregs
->first_free_reg
= -1;
847 /* Returns whether all params need to be passed in core registers or not.
848 This is the case for function part of the runtime ABI. */
849 int floats_in_core_regs(SValue
*sval
)
854 switch (sval
->sym
->v
) {
855 case TOK___floatundisf
:
856 case TOK___floatundidf
:
857 case TOK___fixunssfdi
:
858 case TOK___fixunsdfdi
:
860 case TOK___fixunsxfdi
:
862 case TOK___floatdisf
:
863 case TOK___floatdidf
:
873 /* Return the number of registers needed to return the struct, or 0 if
874 returning via struct pointer. */
875 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
) {
878 size
= type_size(vt
, &align
);
879 if (float_abi
== ARM_HARD_FLOAT
&& !variadic
&&
880 (is_float(vt
->t
) || is_hgen_float_aggr(vt
))) {
885 return (size
+ 7) >> 3;
886 } else if (size
<= 4) {
899 /* Parameters are classified according to how they are copied to their final
900 destination for the function call. Because the copying is performed class
901 after class according to the order in the union below, it is important that
902 some constraints about the order of the members of this union are respected:
903 - CORE_STRUCT_CLASS must come after STACK_CLASS;
904 - CORE_CLASS must come after STACK_CLASS, CORE_STRUCT_CLASS and
906 - VFP_STRUCT_CLASS must come after VFP_CLASS.
907 See the comment for the main loop in copy_params() for the reason. */
918 int start
; /* first reg or addr used depending on the class */
919 int end
; /* last reg used or next free addr depending on the class */
920 SValue
*sval
; /* pointer to SValue on the value stack */
921 struct param_plan
*prev
; /* previous element in this class */
925 struct param_plan
*pplans
; /* array of all the param plans */
926 struct param_plan
*clsplans
[NB_CLASSES
]; /* per class lists of param plans */
929 #define add_param_plan(plan,pplan,class) \
931 pplan.prev = plan->clsplans[class]; \
932 plan->pplans[plan ## _nb] = pplan; \
933 plan->clsplans[class] = &plan->pplans[plan ## _nb++]; \
936 /* Assign parameters to registers and stack with alignment according to the
937 rules in the procedure call standard for the ARM architecture (AAPCS).
938 The overall assignment is recorded in an array of per parameter structures
939 called parameter plans. The parameter plans are also further organized in a
940 number of linked lists, one per class of parameter (see the comment for the
941 definition of union reg_class).
943 nb_args: number of parameters of the function for which a call is generated
944 float_abi: float ABI in use for this function call
945 plan: the structure where the overall assignment is recorded
946 todo: a bitmap that record which core registers hold a parameter
948 Returns the amount of stack space needed for parameter passing
950 Note: this function allocated an array in plan->pplans with tcc_malloc. It
951 is the responsibility of the caller to free this array once used (ie not
952 before copy_params). */
953 static int assign_regs(int nb_args
, int float_abi
, struct plan
*plan
, int *todo
)
956 int ncrn
/* next core register number */, nsaa
/* next stacked argument address*/;
958 struct param_plan pplan
;
959 struct avail_regs avregs
= AVAIL_REGS_INITIALIZER
;
963 plan
->pplans
= tcc_malloc(nb_args
* sizeof(*plan
->pplans
));
964 memset(plan
->clsplans
, 0, sizeof(plan
->clsplans
));
965 for(i
= nb_args
; i
-- ;) {
966 int j
, start_vfpreg
= 0;
967 CType type
= vtop
[-i
].type
;
969 size
= type_size(&type
, &align
);
970 size
= (size
+ 3) & ~3;
971 align
= (align
+ 3) & ~3;
972 switch(vtop
[-i
].type
.t
& VT_BTYPE
) {
977 if (float_abi
== ARM_HARD_FLOAT
) {
978 int is_hfa
= 0; /* Homogeneous float aggregate */
980 if (is_float(vtop
[-i
].type
.t
)
981 || (is_hfa
= is_hgen_float_aggr(&vtop
[-i
].type
))) {
984 start_vfpreg
= assign_vfpreg(&avregs
, align
, size
);
985 end_vfpreg
= start_vfpreg
+ ((size
- 1) >> 2);
986 if (start_vfpreg
>= 0) {
987 pplan
= (struct param_plan
) {start_vfpreg
, end_vfpreg
, &vtop
[-i
]};
989 add_param_plan(plan
, pplan
, VFP_STRUCT_CLASS
);
991 add_param_plan(plan
, pplan
, VFP_CLASS
);
997 ncrn
= (ncrn
+ (align
-1)/4) & ~((align
/4) - 1);
998 if (ncrn
+ size
/4 <= 4 || (ncrn
< 4 && start_vfpreg
!= -1)) {
999 /* The parameter is allocated both in core register and on stack. As
1000 * such, it can be of either class: it would either be the last of
1001 * CORE_STRUCT_CLASS or the first of STACK_CLASS. */
1002 for (j
= ncrn
; j
< 4 && j
< ncrn
+ size
/ 4; j
++)
1004 pplan
= (struct param_plan
) {ncrn
, j
, &vtop
[-i
]};
1005 add_param_plan(plan
, pplan
, CORE_STRUCT_CLASS
);
1008 nsaa
= (ncrn
- 4) * 4;
1016 int is_long
= (vtop
[-i
].type
.t
& VT_BTYPE
) == VT_LLONG
;
1019 ncrn
= (ncrn
+ 1) & -2;
1023 pplan
= (struct param_plan
) {ncrn
, ncrn
, &vtop
[-i
]};
1027 add_param_plan(plan
, pplan
, CORE_CLASS
);
1031 nsaa
= (nsaa
+ (align
- 1)) & ~(align
- 1);
1032 pplan
= (struct param_plan
) {nsaa
, nsaa
+ size
, &vtop
[-i
]};
1033 add_param_plan(plan
, pplan
, STACK_CLASS
);
1034 nsaa
+= size
; /* size already rounded up before */
1039 #undef add_param_plan
1041 /* Copy parameters to their final destination (core reg, VFP reg or stack) for
1044 nb_args: number of parameters the function take
1045 plan: the overall assignment plan for parameters
1046 todo: a bitmap indicating what core reg will hold a parameter
1048 Returns the number of SValue added by this function on the value stack */
1049 static int copy_params(int nb_args
, struct plan
*plan
, int todo
)
1051 int size
, align
, r
, i
, nb_extra_sval
= 0;
1052 struct param_plan
*pplan
;
1055 /* Several constraints require parameters to be copied in a specific order:
1056 - structures are copied to the stack before being loaded in a reg;
1057 - floats loaded to an odd numbered VFP reg are first copied to the
1058 preceding even numbered VFP reg and then moved to the next VFP reg.
1060 It is thus important that:
1061 - structures assigned to core regs must be copied after parameters
1062 assigned to the stack but before structures assigned to VFP regs because
1063 a structure can lie partly in core registers and partly on the stack;
1064 - parameters assigned to the stack and all structures be copied before
1065 parameters assigned to a core reg since copying a parameter to the stack
1066 require using a core reg;
1067 - parameters assigned to VFP regs be copied before structures assigned to
1068 VFP regs as the copy might use an even numbered VFP reg that already
1069 holds part of a structure. */
1071 for(i
= 0; i
< NB_CLASSES
; i
++) {
1072 for(pplan
= plan
->clsplans
[i
]; pplan
; pplan
= pplan
->prev
) {
1075 && (i
!= CORE_CLASS
|| pplan
->sval
->r
< VT_CONST
))
1078 vpushv(pplan
->sval
);
1079 pplan
->sval
->r
= pplan
->sval
->r2
= VT_CONST
; /* disable entry */
1082 case CORE_STRUCT_CLASS
:
1083 case VFP_STRUCT_CLASS
:
1084 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_STRUCT
) {
1086 size
= type_size(&pplan
->sval
->type
, &align
);
1087 /* align to stack align size */
1088 size
= (size
+ 3) & ~3;
1089 if (i
== STACK_CLASS
&& pplan
->prev
)
1090 padding
= pplan
->start
- pplan
->prev
->end
;
1091 size
+= padding
; /* Add padding if any */
1092 /* allocate the necessary size on stack */
1094 /* generate structure store */
1095 r
= get_reg(RC_INT
);
1096 o(0xE28D0000|(intr(r
)<<12)|padding
); /* add r, sp, padding */
1097 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1099 vstore(); /* memcpy to current sp + potential padding */
1101 /* Homogeneous float aggregate are loaded to VFP registers
1102 immediately since there is no way of loading data in multiple
1103 non consecutive VFP registers as what is done for other
1104 structures (see the use of todo). */
1105 if (i
== VFP_STRUCT_CLASS
) {
1106 int first
= pplan
->start
, nb
= pplan
->end
- first
+ 1;
1107 /* vpop.32 {pplan->start, ..., pplan->end} */
1108 o(0xECBD0A00|(first
&1)<<22|(first
>>1)<<12|nb
);
1109 /* No need to write the register used to a SValue since VFP regs
1110 cannot be used for gcall_or_jmp */
1113 if (is_float(pplan
->sval
->type
.t
)) {
1115 r
= vfpr(gv(RC_FLOAT
)) << 12;
1116 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_FLOAT
)
1120 r
|= 0x101; /* vpush.32 -> vpush.64 */
1122 o(0xED2D0A01 + r
); /* vpush */
1124 r
= fpr(gv(RC_FLOAT
)) << 12;
1125 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_FLOAT
)
1127 else if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1130 size
= LDOUBLE_SIZE
;
1137 o(0xED2D0100|r
|(size
>>2)); /* some kind of vpush for FPA */
1140 /* simple type (currently always same size) */
1141 /* XXX: implicit cast ? */
1143 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1147 o(0xE52D0004|(intr(r
)<<12)); /* push r */
1151 o(0xE52D0004|(intr(r
)<<12)); /* push r */
1153 if (i
== STACK_CLASS
&& pplan
->prev
)
1154 gadd_sp(pplan
->prev
->end
- pplan
->start
); /* Add padding if any */
1159 gv(regmask(TREG_F0
+ (pplan
->start
>> 1)));
1160 if (pplan
->start
& 1) { /* Must be in upper part of double register */
1161 o(0xEEF00A40|((pplan
->start
>>1)<<12)|(pplan
->start
>>1)); /* vmov.f32 s(n+1), sn */
1162 vtop
->r
= VT_CONST
; /* avoid being saved on stack by gv for next float */
1167 if ((pplan
->sval
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1169 gv(regmask(pplan
->end
));
1170 pplan
->sval
->r2
= vtop
->r
;
1173 gv(regmask(pplan
->start
));
1174 /* Mark register as used so that gcall_or_jmp use another one
1175 (regs >=4 are free as never used to pass parameters) */
1176 pplan
->sval
->r
= vtop
->r
;
1183 /* second pass to restore registers that were saved on stack by accident.
1184 Maybe redundant after the "lvalue_save" patch in tccgen.c:gv() */
1188 /* Manually free remaining registers since next parameters are loaded
1189 * manually, without the help of gv(int). */
1193 o(0xE8BD0000|todo
); /* pop {todo} */
1194 for(pplan
= plan
->clsplans
[CORE_STRUCT_CLASS
]; pplan
; pplan
= pplan
->prev
) {
1196 pplan
->sval
->r
= pplan
->start
;
1197 /* An SValue can only pin 2 registers at best (r and r2) but a structure
1198 can occupy more than 2 registers. Thus, we need to push on the value
1199 stack some fake parameter to have on SValue for each registers used
1200 by a structure (r2 is not used). */
1201 for (r
= pplan
->start
+ 1; r
<= pplan
->end
; r
++) {
1202 if (todo
& (1 << r
)) {
1210 return nb_extra_sval
;
1213 /* Generate function call. The function address is pushed first, then
1214 all the parameters in call order. This functions pops all the
1215 parameters and the function address. */
1216 void gfunc_call(int nb_args
)
1219 int def_float_abi
= float_abi
;
1226 if (float_abi
== ARM_HARD_FLOAT
) {
1227 variadic
= (vtop
[-nb_args
].type
.ref
->f
.func_type
== FUNC_ELLIPSIS
);
1228 if (variadic
|| floats_in_core_regs(&vtop
[-nb_args
]))
1229 float_abi
= ARM_SOFTFP_FLOAT
;
1232 /* cannot let cpu flags if other instruction are generated. Also avoid leaving
1233 VT_JMP anywhere except on the top of the stack because it would complicate
1234 the code generator. */
1235 r
= vtop
->r
& VT_VALMASK
;
1236 if (r
== VT_CMP
|| (r
& ~1) == VT_JMP
)
1239 args_size
= assign_regs(nb_args
, float_abi
, &plan
, &todo
);
1242 if (args_size
& 7) { /* Stack must be 8 byte aligned at fct call for EABI */
1243 args_size
= (args_size
+ 7) & ~7;
1244 o(0xE24DD004); /* sub sp, sp, #4 */
1248 nb_args
+= copy_params(nb_args
, &plan
, todo
);
1249 tcc_free(plan
.pplans
);
1251 /* Move fct SValue on top as required by gcall_or_jmp */
1255 gadd_sp(args_size
); /* pop all parameters passed on the stack */
1256 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
1257 if(float_abi
== ARM_SOFTFP_FLOAT
&& is_float(vtop
->type
.ref
->type
.t
)) {
1258 if((vtop
->type
.ref
->type
.t
& VT_BTYPE
) == VT_FLOAT
) {
1259 o(0xEE000A10); /*vmov s0, r0 */
1261 o(0xEE000B10); /* vmov.32 d0[0], r0 */
1262 o(0xEE201B10); /* vmov.32 d0[1], r1 */
1266 vtop
-= nb_args
+ 1; /* Pop all params and fct address from value stack */
1267 leaffunc
= 0; /* we are calling a function, so we aren't in a leaf function */
1268 float_abi
= def_float_abi
;
1271 /* generate function prolog of type 't' */
1272 void gfunc_prolog(CType
*func_type
)
1275 int n
, nf
, size
, align
, rs
, struct_ret
= 0;
1276 int addr
, pn
, sn
; /* pn=core, sn=stack */
1280 struct avail_regs avregs
= AVAIL_REGS_INITIALIZER
;
1283 sym
= func_type
->ref
;
1284 func_vt
= sym
->type
;
1285 func_var
= (func_type
->ref
->f
.func_type
== FUNC_ELLIPSIS
);
1288 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
&&
1289 !gfunc_sret(&func_vt
, func_var
, &ret_type
, &align
, &rs
))
1293 func_vc
= 12; /* Offset from fp of the place to store the result */
1295 for(sym2
= sym
->next
; sym2
&& (n
< 4 || nf
< 16); sym2
= sym2
->next
) {
1296 size
= type_size(&sym2
->type
, &align
);
1298 if (float_abi
== ARM_HARD_FLOAT
&& !func_var
&&
1299 (is_float(sym2
->type
.t
) || is_hgen_float_aggr(&sym2
->type
))) {
1300 int tmpnf
= assign_vfpreg(&avregs
, align
, size
);
1301 tmpnf
+= (size
+ 3) / 4;
1302 nf
= (tmpnf
> nf
) ? tmpnf
: nf
;
1306 n
+= (size
+ 3) / 4;
1308 o(0xE1A0C00D); /* mov ip,sp */
1317 o(0xE92D0000|((1<<n
)-1)); /* save r0-r4 on stack if needed */
1322 nf
=(nf
+1)&-2; /* nf => HARDFLOAT => EABI */
1323 o(0xED2D0A00|nf
); /* save s0-s15 on stack if needed */
1325 o(0xE92D5800); /* save fp, ip, lr */
1326 o(0xE1A0B00D); /* mov fp, sp */
1327 func_sub_sp_offset
= ind
;
1328 o(0xE1A00000); /* nop, leave space for stack adjustment in epilog */
1331 if (float_abi
== ARM_HARD_FLOAT
) {
1333 avregs
= AVAIL_REGS_INITIALIZER
;
1336 pn
= struct_ret
, sn
= 0;
1337 while ((sym
= sym
->next
)) {
1340 size
= type_size(type
, &align
);
1341 size
= (size
+ 3) >> 2;
1342 align
= (align
+ 3) & ~3;
1344 if (float_abi
== ARM_HARD_FLOAT
&& !func_var
&& (is_float(sym
->type
.t
)
1345 || is_hgen_float_aggr(&sym
->type
))) {
1346 int fpn
= assign_vfpreg(&avregs
, align
, size
<< 2);
1355 pn
= (pn
+ (align
-1)/4) & -(align
/4);
1357 addr
= (nf
+ pn
) * 4;
1364 sn
= (sn
+ (align
-1)/4) & -(align
/4);
1366 addr
= (n
+ nf
+ sn
) * 4;
1369 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| lvalue_type(type
->t
),
1377 /* generate function epilog */
1378 void gfunc_epilog(void)
1382 /* Copy float return value to core register if base standard is used and
1383 float computation is made with VFP */
1384 #if defined(TCC_ARM_EABI) && defined(TCC_ARM_VFP)
1385 if ((float_abi
== ARM_SOFTFP_FLOAT
|| func_var
) && is_float(func_vt
.t
)) {
1386 if((func_vt
.t
& VT_BTYPE
) == VT_FLOAT
)
1387 o(0xEE100A10); /* fmrs r0, s0 */
1389 o(0xEE100B10); /* fmrdl r0, d0 */
1390 o(0xEE301B10); /* fmrdh r1, d0 */
1394 o(0xE89BA800); /* restore fp, sp, pc */
1395 diff
= (-loc
+ 3) & -4;
1398 diff
= ((diff
+ 11) & -8) - 4;
1401 x
=stuff_const(0xE24BD000, diff
); /* sub sp,fp,# */
1403 *(uint32_t *)(cur_text_section
->data
+ func_sub_sp_offset
) = x
;
1407 o(0xE59FC004); /* ldr ip,[pc+4] */
1408 o(0xE04BD00C); /* sub sp,fp,ip */
1409 o(0xE1A0F00E); /* mov pc,lr */
1411 *(uint32_t *)(cur_text_section
->data
+ func_sub_sp_offset
) = 0xE1000000|encbranch(func_sub_sp_offset
,addr
,1);
1416 ST_FUNC
void gen_fill_nops(int bytes
)
1419 tcc_error("alignment of code section not multiple of 4");
1426 /* generate a jump to a label */
1433 o(0xE0000000|encbranch(r
,t
,1));
1437 /* generate a jump to a fixed address */
1438 void gjmp_addr(int a
)
1443 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1444 int gtst(int inv
, int t
)
1449 v
= vtop
->r
& VT_VALMASK
;
1452 if (nocode_wanted
) {
1454 } else if (v
== VT_CMP
) {
1455 op
=mapcc(inv
?negcc(vtop
->c
.i
):vtop
->c
.i
);
1456 op
|=encbranch(r
,t
,1);
1459 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1460 if ((v
& 1) == inv
) {
1469 p
= decbranch(lp
=p
);
1471 x
= (uint32_t *)(cur_text_section
->data
+ lp
);
1473 *x
|= encbranch(lp
,t
,1);
1486 /* generate an integer binary operation */
1487 void gen_opi(int op
)
1490 uint32_t opc
= 0, r
, fr
;
1491 unsigned short retreg
= REG_IRET
;
1499 case TOK_ADDC1
: /* add with carry generation */
1507 case TOK_SUBC1
: /* sub with carry generation */
1511 case TOK_ADDC2
: /* add with carry use */
1515 case TOK_SUBC2
: /* sub with carry use */
1532 gv2(RC_INT
, RC_INT
);
1536 o(0xE0000090|(intr(r
)<<16)|(intr(r
)<<8)|intr(fr
));
1561 func
=TOK___aeabi_idivmod
;
1570 func
=TOK___aeabi_uidivmod
;
1578 gv2(RC_INT
, RC_INT
);
1579 r
=intr(vtop
[-1].r2
=get_reg(RC_INT
));
1581 vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(c
));
1583 o(0xE0800090|(r
<<16)|(intr(vtop
->r
)<<12)|(intr(c
)<<8)|intr(vtop
[1].r
));
1592 if((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1593 if(opc
== 4 || opc
== 5 || opc
== 0xc) {
1595 opc
|=2; // sub -> rsb
1598 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
||
1599 (vtop
->r
& (VT_VALMASK
& ~1)) == VT_JMP
)
1604 opc
=0xE0000000|(opc
<<20)|(c
<<16);
1605 if((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1607 x
=stuff_const(opc
|0x2000000,vtop
->c
.i
);
1609 r
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(vtop
[-1].r
)));
1614 fr
=intr(gv(RC_INT
));
1615 r
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,two2mask(vtop
->r
,vtop
[-1].r
)));
1619 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1625 opc
=0xE1A00000|(opc
<<5);
1626 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
||
1627 (vtop
->r
& (VT_VALMASK
& ~1)) == VT_JMP
)
1633 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1634 fr
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,regmask(vtop
[-1].r
)));
1635 c
= vtop
->c
.i
& 0x1f;
1636 o(opc
|(c
<<7)|(fr
<<12));
1638 fr
=intr(gv(RC_INT
));
1639 c
=intr(vtop
[-1].r
=get_reg_ex(RC_INT
,two2mask(vtop
->r
,vtop
[-1].r
)));
1640 o(opc
|(c
<<12)|(fr
<<8)|0x10);
1645 vpush_global_sym(&func_old_type
, func
);
1652 tcc_error("gen_opi %i unimplemented!",op
);
1657 static int is_zero(int i
)
1659 if((vtop
[i
].r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) != VT_CONST
)
1661 if (vtop
[i
].type
.t
== VT_FLOAT
)
1662 return (vtop
[i
].c
.f
== 0.f
);
1663 else if (vtop
[i
].type
.t
== VT_DOUBLE
)
1664 return (vtop
[i
].c
.d
== 0.0);
1665 return (vtop
[i
].c
.ld
== 0.l
);
1668 /* generate a floating point operation 'v = t1 op t2' instruction. The
1669 * two operands are guaranteed to have the same floating point type */
1670 void gen_opf(int op
)
1674 x
=0xEE000A00|T2CPR(vtop
->type
.t
);
1692 x
|=0x810000; /* fsubX -> fnegX */
1705 if(op
< TOK_ULT
|| op
> TOK_GT
) {
1706 tcc_error("unknown fp op %x!",op
);
1712 case TOK_LT
: op
=TOK_GT
; break;
1713 case TOK_GE
: op
=TOK_ULE
; break;
1714 case TOK_LE
: op
=TOK_GE
; break;
1715 case TOK_GT
: op
=TOK_ULT
; break;
1718 x
|=0xB40040; /* fcmpX */
1719 if(op
!=TOK_EQ
&& op
!=TOK_NE
)
1720 x
|=0x80; /* fcmpX -> fcmpeX */
1723 o(x
|0x10000|(vfpr(gv(RC_FLOAT
))<<12)); /* fcmp(e)X -> fcmp(e)zX */
1725 x
|=vfpr(gv(RC_FLOAT
));
1727 o(x
|(vfpr(gv(RC_FLOAT
))<<12));
1730 o(0xEEF1FA10); /* fmstat */
1733 case TOK_LE
: op
=TOK_ULE
; break;
1734 case TOK_LT
: op
=TOK_ULT
; break;
1735 case TOK_UGE
: op
=TOK_GE
; break;
1736 case TOK_UGT
: op
=TOK_GT
; break;
1753 vtop
->r
=get_reg_ex(RC_FLOAT
,r
);
1756 o(x
|(vfpr(vtop
->r
)<<12));
1760 static uint32_t is_fconst()
1764 if((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) != VT_CONST
)
1766 if (vtop
->type
.t
== VT_FLOAT
)
1768 else if (vtop
->type
.t
== VT_DOUBLE
)
1798 /* generate a floating point operation 'v = t1 op t2' instruction. The
1799 two operands are guaranteed to have the same floating point type */
1800 void gen_opf(int op
)
1802 uint32_t x
, r
, r2
, c1
, c2
;
1803 //fputs("gen_opf\n",stderr);
1809 #if LDOUBLE_SIZE == 8
1810 if ((vtop
->type
.t
& VT_BTYPE
) != VT_FLOAT
)
1813 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1815 else if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
)
1826 r
=fpr(gv(RC_FLOAT
));
1833 r2
=fpr(gv(RC_FLOAT
));
1842 r
=fpr(gv(RC_FLOAT
));
1844 } else if(c1
&& c1
<=0xf) {
1847 r
=fpr(gv(RC_FLOAT
));
1852 r
=fpr(gv(RC_FLOAT
));
1854 r2
=fpr(gv(RC_FLOAT
));
1863 r
=fpr(gv(RC_FLOAT
));
1868 r2
=fpr(gv(RC_FLOAT
));
1876 r
=fpr(gv(RC_FLOAT
));
1878 } else if(c1
&& c1
<=0xf) {
1881 r
=fpr(gv(RC_FLOAT
));
1886 r
=fpr(gv(RC_FLOAT
));
1888 r2
=fpr(gv(RC_FLOAT
));
1892 if(op
>= TOK_ULT
&& op
<= TOK_GT
) {
1893 x
|=0xd0f110; // cmfe
1894 /* bug (intention?) in Linux FPU emulator
1895 doesn't set carry if equal */
1901 tcc_error("unsigned comparison on floats?");
1907 op
=TOK_ULE
; /* correct in unordered case only if AC bit in FPSR set */
1911 x
&=~0x400000; // cmfe -> cmf
1933 r
=fpr(gv(RC_FLOAT
));
1940 r2
=fpr(gv(RC_FLOAT
));
1942 vtop
[-1].r
= VT_CMP
;
1945 tcc_error("unknown fp op %x!",op
);
1949 if(vtop
[-1].r
== VT_CMP
)
1955 vtop
[-1].r
=get_reg_ex(RC_FLOAT
,two2mask(vtop
[-1].r
,c1
));
1959 o(x
|(r
<<16)|(c1
<<12)|r2
);
1963 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1964 and 'long long' cases. */
1965 ST_FUNC
void gen_cvt_itof1(int t
)
1969 bt
=vtop
->type
.t
& VT_BTYPE
;
1970 if(bt
== VT_INT
|| bt
== VT_SHORT
|| bt
== VT_BYTE
) {
1976 r2
=vfpr(vtop
->r
=get_reg(RC_FLOAT
));
1977 o(0xEE000A10|(r
<<12)|(r2
<<16)); /* fmsr */
1979 if(!(vtop
->type
.t
& VT_UNSIGNED
))
1980 r2
|=0x80; /* fuitoX -> fsituX */
1981 o(0xEEB80A40|r2
|T2CPR(t
)); /* fYitoX*/
1983 r2
=fpr(vtop
->r
=get_reg(RC_FLOAT
));
1984 if((t
& VT_BTYPE
) != VT_FLOAT
)
1985 dsize
=0x80; /* flts -> fltd */
1986 o(0xEE000110|dsize
|(r2
<<16)|(r
<<12)); /* flts */
1987 if((vtop
->type
.t
& (VT_UNSIGNED
|VT_BTYPE
)) == (VT_UNSIGNED
|VT_INT
)) {
1989 o(0xE3500000|(r
<<12)); /* cmp */
1990 r
=fpr(get_reg(RC_FLOAT
));
1991 if(last_itod_magic
) {
1992 off
=ind
+8-last_itod_magic
;
1997 o(0xBD1F0100|(r
<<12)|off
); /* ldflts */
1999 o(0xEA000000); /* b */
2000 last_itod_magic
=ind
;
2001 o(0x4F800000); /* 4294967296.0f */
2003 o(0xBE000100|dsize
|(r2
<<16)|(r2
<<12)|r
); /* adflt */
2007 } else if(bt
== VT_LLONG
) {
2009 CType
*func_type
= 0;
2010 if((t
& VT_BTYPE
) == VT_FLOAT
) {
2011 func_type
= &func_float_type
;
2012 if(vtop
->type
.t
& VT_UNSIGNED
)
2013 func
=TOK___floatundisf
;
2015 func
=TOK___floatdisf
;
2016 #if LDOUBLE_SIZE != 8
2017 } else if((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2018 func_type
= &func_ldouble_type
;
2019 if(vtop
->type
.t
& VT_UNSIGNED
)
2020 func
=TOK___floatundixf
;
2022 func
=TOK___floatdixf
;
2023 } else if((t
& VT_BTYPE
) == VT_DOUBLE
) {
2025 } else if((t
& VT_BTYPE
) == VT_DOUBLE
|| (t
& VT_BTYPE
) == VT_LDOUBLE
) {
2027 func_type
= &func_double_type
;
2028 if(vtop
->type
.t
& VT_UNSIGNED
)
2029 func
=TOK___floatundidf
;
2031 func
=TOK___floatdidf
;
2034 vpush_global_sym(func_type
, func
);
2042 tcc_error("unimplemented gen_cvt_itof %x!",vtop
->type
.t
);
2045 /* convert fp to int 't' type */
2046 void gen_cvt_ftoi(int t
)
2052 r2
=vtop
->type
.t
& VT_BTYPE
;
2055 r
=vfpr(gv(RC_FLOAT
));
2057 o(0xEEBC0AC0|(r
<<12)|r
|T2CPR(r2
)|u
); /* ftoXizY */
2058 r2
=intr(vtop
->r
=get_reg(RC_INT
));
2059 o(0xEE100A10|(r
<<16)|(r2
<<12));
2064 func
=TOK___fixunssfsi
;
2065 #if LDOUBLE_SIZE != 8
2066 else if(r2
== VT_LDOUBLE
)
2067 func
=TOK___fixunsxfsi
;
2068 else if(r2
== VT_DOUBLE
)
2070 else if(r2
== VT_LDOUBLE
|| r2
== VT_DOUBLE
)
2072 func
=TOK___fixunsdfsi
;
2074 r
=fpr(gv(RC_FLOAT
));
2075 r2
=intr(vtop
->r
=get_reg(RC_INT
));
2076 o(0xEE100170|(r2
<<12)|r
);
2080 } else if(t
== VT_LLONG
) { // unsigned handled in gen_cvt_ftoi1
2083 #if LDOUBLE_SIZE != 8
2084 else if(r2
== VT_LDOUBLE
)
2086 else if(r2
== VT_DOUBLE
)
2088 else if(r2
== VT_LDOUBLE
|| r2
== VT_DOUBLE
)
2093 vpush_global_sym(&func_old_type
, func
);
2098 vtop
->r2
= REG_LRET
;
2102 tcc_error("unimplemented gen_cvt_ftoi!");
2105 /* convert from one floating point type to another */
2106 void gen_cvt_ftof(int t
)
2109 if(((vtop
->type
.t
& VT_BTYPE
) == VT_FLOAT
) != ((t
& VT_BTYPE
) == VT_FLOAT
)) {
2110 uint32_t r
= vfpr(gv(RC_FLOAT
));
2111 o(0xEEB70AC0|(r
<<12)|r
|T2CPR(vtop
->type
.t
));
2114 /* all we have to do on i386 and FPA ARM is to put the float in a register */
2119 /* computed goto support */
2126 /* Save the stack pointer onto the stack and return the location of its address */
2127 ST_FUNC
void gen_vla_sp_save(int addr
) {
2130 v
.r
= VT_LOCAL
| VT_LVAL
;
2135 /* Restore the SP from a location on the stack */
2136 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2139 v
.r
= VT_LOCAL
| VT_LVAL
;
2144 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2145 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2146 int r
= intr(gv(RC_INT
));
2147 o(0xE04D0000|(r
<<12)|r
); /* sub r, sp, r */
2155 if (align
& (align
- 1))
2156 tcc_error("alignment is not a power of 2: %i", align
);
2157 o(stuff_const(0xE3C0D000|(r
<<16), align
- 1)); /* bic sp, r, #align-1 */
2161 /* end of ARM code generator */
2162 /*************************************************************/
2164 /*************************************************************/