2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
38 #define RC_ST0 0x0080 /* only for long double */
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
115 ST_DATA
const char *target_machine_defs
=
120 ST_DATA
const int reg_classes
[NB_REGS
] = {
121 /* eax */ RC_INT
| RC_RAX
,
122 /* ecx */ RC_INT
| RC_RCX
,
123 /* edx */ RC_INT
| RC_RDX
,
137 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
138 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
139 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
140 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
141 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
142 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
143 /* xmm6 an xmm7 are included so gv() can be used on them,
144 but they are not tagged with RC_FLOAT because they are
145 callee saved on Windows */
151 static unsigned long func_sub_sp_offset
;
152 static int func_ret_sub
;
154 #if defined(CONFIG_TCC_BCHECK)
155 static addr_t func_bound_offset
;
156 static unsigned long func_bound_ind
;
157 ST_DATA
int func_bound_add_epilog
;
161 static int func_scratch
, func_alloca
;
164 /* XXX: make it faster ? */
165 ST_FUNC
void g(int c
)
171 if (ind1
> cur_text_section
->data_allocated
)
172 section_realloc(cur_text_section
, ind1
);
173 cur_text_section
->data
[ind
] = c
;
177 ST_FUNC
void o(unsigned int c
)
185 ST_FUNC
void gen_le16(int v
)
191 ST_FUNC
void gen_le32(int c
)
199 ST_FUNC
void gen_le64(int64_t c
)
211 static void orex(int ll
, int r
, int r2
, int b
)
213 if ((r
& VT_VALMASK
) >= VT_CONST
)
215 if ((r2
& VT_VALMASK
) >= VT_CONST
)
217 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
218 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
222 /* output a symbol and patch all calls to it */
223 ST_FUNC
void gsym_addr(int t
, int a
)
226 unsigned char *ptr
= cur_text_section
->data
+ t
;
227 uint32_t n
= read32le(ptr
); /* next value */
228 write32le(ptr
, a
< 0 ? -a
: a
- t
- 4);
233 static int is64_type(int t
)
235 return ((t
& VT_BTYPE
) == VT_PTR
||
236 (t
& VT_BTYPE
) == VT_FUNC
||
237 (t
& VT_BTYPE
) == VT_LLONG
);
240 /* instruction + 4 bytes data. Return the address of the data */
241 static int oad(int c
, int s
)
252 /* generate jmp to a label */
253 #define gjmp2(instr,lbl) oad(instr,lbl)
255 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
258 greloca(cur_text_section
, sym
, ind
, R_X86_64_32S
, c
), c
=0;
262 /* output constant with relocation if 'r & VT_SYM' is true */
263 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
266 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
270 /* output constant with relocation if 'r & VT_SYM' is true */
271 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
274 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
278 /* output got address with relocation */
279 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
282 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
283 get_tok_str(sym
->v
, NULL
), c
, r
,
284 cur_text_section
->data
[ind
-3],
285 cur_text_section
->data
[ind
-2],
286 cur_text_section
->data
[ind
-1]
289 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
292 /* we use add c, %xxx for displacement */
294 o(0xc0 + REG_VALUE(r
));
299 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
301 op_reg
= REG_VALUE(op_reg
) << 3;
302 if ((r
& VT_VALMASK
) == VT_CONST
) {
303 /* constant memory reference */
305 /* Absolute memory reference */
306 o(0x04 | op_reg
); /* [sib] | destreg */
307 oad(0x25, c
); /* disp32 */
309 o(0x05 | op_reg
); /* (%rip)+disp32 | destreg */
311 gen_gotpcrel(r
, sym
, c
);
313 gen_addrpc32(r
, sym
, c
);
316 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
317 /* currently, we use only ebp as base */
319 /* short reference */
323 oad(0x85 | op_reg
, c
);
325 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
327 g(0x80 | op_reg
| REG_VALUE(r
));
330 g(0x00 | op_reg
| REG_VALUE(r
));
333 g(0x00 | op_reg
| REG_VALUE(r
));
337 /* generate a modrm reference. 'op_reg' contains the additional 3
339 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
341 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
344 /* generate a modrm reference. 'op_reg' contains the additional 3
346 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
349 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
350 orex(1, r
, op_reg
, opcode
);
351 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
355 /* load 'r' from value 'sv' */
356 void load(int r
, SValue
*sv
)
358 int v
, t
, ft
, fc
, fr
;
363 sv
= pe_getimport(sv
, &v2
);
367 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
369 if (fc
!= sv
->c
.i
&& (fr
& VT_SYM
))
370 tcc_error("64 bit addend in load");
372 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
374 #ifndef TCC_TARGET_PE
375 /* we use indirect access via got */
376 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
377 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
378 /* use the result register as a temporal register */
379 int tr
= r
| TREG_MEM
;
381 /* we cannot use float registers as a temporal register */
382 tr
= get_reg(RC_INT
) | TREG_MEM
;
384 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
386 /* load from the temporal register */
394 if (v
== VT_LLOCAL
) {
396 v1
.r
= VT_LOCAL
| VT_LVAL
;
399 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
400 fr
= get_reg(RC_INT
);
404 /* If the addends doesn't fit into a 32bit signed
405 we must use a 64bit move. We've checked above
406 that this doesn't have a sym associated. */
407 v1
.type
.t
= VT_LLONG
;
411 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
412 fr
= get_reg(RC_INT
);
417 /* Like GCC we can load from small enough properly sized
418 structs and unions as well.
419 XXX maybe move to generic operand handling, but should
420 occur only with asm, so tccasm.c might also be a better place */
421 if ((ft
& VT_BTYPE
) == VT_STRUCT
) {
423 switch (type_size(&sv
->type
, &align
)) {
424 case 1: ft
= VT_BYTE
; break;
425 case 2: ft
= VT_SHORT
; break;
426 case 4: ft
= VT_INT
; break;
427 case 8: ft
= VT_LLONG
; break;
429 tcc_error("invalid aggregate type for register load");
433 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
435 r
= REG_VALUE(r
); /* movd */
436 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
437 b
= 0x7e0ff3; /* movq */
439 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
440 b
= 0xdb, r
= 5; /* fldt */
441 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
442 b
= 0xbe0f; /* movsbl */
443 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
444 b
= 0xb60f; /* movzbl */
445 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
446 b
= 0xbf0f; /* movswl */
447 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
448 b
= 0xb70f; /* movzwl */
449 } else if ((ft
& VT_TYPE
) == (VT_VOID
)) {
450 /* Can happen with zero size structs */
453 assert(((ft
& VT_BTYPE
) == VT_INT
)
454 || ((ft
& VT_BTYPE
) == VT_LLONG
)
455 || ((ft
& VT_BTYPE
) == VT_PTR
)
456 || ((ft
& VT_BTYPE
) == VT_FUNC
)
462 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
465 gen_modrm(r
, fr
, sv
->sym
, fc
);
472 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
473 gen_addrpc32(fr
, sv
->sym
, fc
);
475 if (sv
->sym
->type
.t
& VT_STATIC
) {
477 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
478 gen_addrpc32(fr
, sv
->sym
, fc
);
481 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
482 gen_gotpcrel(r
, sv
->sym
, fc
);
485 } else if (is64_type(ft
)) {
486 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
489 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
492 } else if (v
== VT_LOCAL
) {
493 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
494 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
495 } else if (v
== VT_CMP
) {
500 /* This was a float compare. If the parity bit is
501 set the result was unordered, meaning false for everything
502 except TOK_NE, and true for TOK_NE. */
503 orex(0, r
, 0, 0xb0 + REG_VALUE(r
)); /* mov $0/1,%al */
504 g(v
^ fc
^ (v
== TOK_NE
));
505 o(0x037a + (REX_BASE(r
) << 8));
507 orex(0,r
,0, 0x0f); /* setxx %br */
509 o(0xc0 + REG_VALUE(r
));
511 o(0xc0b6 + REG_VALUE(r
) * 0x900); /* movzbl %al, %eax */
512 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
515 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
516 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
519 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
521 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
523 /* gen_cvt_ftof(VT_DOUBLE); */
524 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
525 /* movsd -0x10(%rsp),%xmmN */
527 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
530 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
531 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
534 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
537 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
539 } else if (r
== TREG_ST0
) {
540 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
541 /* gen_cvt_ftof(VT_LDOUBLE); */
542 /* movsd %xmmN,-0x10(%rsp) */
544 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
546 o(0xf02444dd); /* fldl -0x10(%rsp) */
548 orex(is64_type(ft
), r
, v
, 0x89);
549 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
555 /* store register 'r' in lvalue 'v' */
556 void store(int r
, SValue
*v
)
560 /* store the REX prefix in this variable when PIC is enabled */
565 v
= pe_getimport(v
, &v2
);
568 fr
= v
->r
& VT_VALMASK
;
571 if (fc
!= v
->c
.i
&& (fr
& VT_SYM
))
572 tcc_error("64 bit addend in store");
573 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
576 #ifndef TCC_TARGET_PE
577 /* we need to access the variable via got */
578 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
579 /* mov xx(%rip), %r11 */
581 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
582 pic
= is64_type(bt
) ? 0x49 : 0x41;
586 /* XXX: incorrect if float reg to reg */
587 if (bt
== VT_FLOAT
) {
590 o(0x7e0f); /* movd */
592 } else if (bt
== VT_DOUBLE
) {
595 o(0xd60f); /* movq */
597 } else if (bt
== VT_LDOUBLE
) {
598 o(0xc0d9); /* fld %st(0) */
606 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
608 else if (is64_type(bt
))
614 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
619 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
620 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
621 } else if (fr
!= r
) {
622 orex(1, fr
, r
, op64
);
623 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
626 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
627 gen_modrm(r
, v
->r
, v
->sym
, fc
);
628 } else if (fr
!= r
) {
629 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
634 /* 'is_jmp' is '1' if it is a jump */
635 static void gcall_or_jmp(int is_jmp
)
638 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
639 ((vtop
->r
& VT_SYM
) && (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
640 /* constant symbolic case -> simple relocation */
642 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
644 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
646 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
648 /* otherwise, indirect call */
652 o(0xff); /* call/jmp *r */
653 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
657 #if defined(CONFIG_TCC_BCHECK)
659 static void gen_bounds_call(int v
)
661 Sym
*sym
= external_helper_sym(v
);
664 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
666 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PLT32
, -4);
671 # define TREG_FASTCALL_1 TREG_RCX
673 # define TREG_FASTCALL_1 TREG_RDI
676 static void gen_bounds_prolog(void)
678 /* leave some room for bound checking code */
679 func_bound_offset
= lbounds_section
->data_offset
;
680 func_bound_ind
= ind
;
681 func_bound_add_epilog
= 0;
682 o(0x0d8d48 + ((TREG_FASTCALL_1
== TREG_RDI
) * 0x300000)); /*lbound section pointer */
684 oad(0xb8, 0); /* call to function */
687 static void gen_bounds_epilog(void)
692 int offset_modified
= func_bound_offset
!= lbounds_section
->data_offset
;
694 if (!offset_modified
&& !func_bound_add_epilog
)
697 /* add end of table info */
698 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
701 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
702 func_bound_offset
, lbounds_section
->data_offset
);
704 /* generate bound local allocation */
705 if (offset_modified
) {
707 ind
= func_bound_ind
;
708 greloca(cur_text_section
, sym_data
, ind
+ 3, R_X86_64_PC32
, -4);
710 gen_bounds_call(TOK___bound_local_new
);
714 /* generate bound check local freeing */
715 o(0x5250); /* save returned value, if any */
716 greloca(cur_text_section
, sym_data
, ind
+ 3, R_X86_64_PC32
, -4);
717 o(0x0d8d48 + ((TREG_FASTCALL_1
== TREG_RDI
) * 0x300000)); /* lea xxx(%rip), %rcx/rdi */
719 gen_bounds_call(TOK___bound_local_delete
);
720 o(0x585a); /* restore returned value, if any */
727 static const uint8_t arg_regs
[REGN
] = {
728 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
731 /* Prepare arguments in R10 and R11 rather than RCX and RDX
732 because gv() will not ever use these */
733 static int arg_prepare_reg(int idx
) {
734 if (idx
== 0 || idx
== 1)
735 /* idx=0: r10, idx=1: r11 */
738 return arg_regs
[idx
];
741 /* Generate function call. The function address is pushed first, then
742 all the parameters in call order. This functions pops all the
743 parameters and the function address. */
745 static void gen_offs_sp(int b
, int r
, int d
)
747 orex(1,0,r
& 0x100 ? 0 : r
, b
);
749 o(0x2444 | (REG_VALUE(r
) << 3));
752 o(0x2484 | (REG_VALUE(r
) << 3));
757 static int using_regs(int size
)
759 return !(size
> 8 || (size
& (size
- 1)));
762 /* Return the number of registers needed to return the struct, or 0 if
763 returning via struct pointer. */
764 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
767 *ret_align
= 1; // Never have to re-align return values for x86-64
769 size
= type_size(vt
, &align
);
770 if (!using_regs(size
))
784 static int is_sse_float(int t
) {
787 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
790 static int gfunc_arg_size(CType
*type
) {
792 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
794 return type_size(type
, &align
);
797 void gfunc_call(int nb_args
)
799 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
802 #ifdef CONFIG_TCC_BCHECK
803 if (tcc_state
->do_bounds_check
)
804 gbound_args(nb_args
);
807 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
810 /* for struct arguments, we need to call memcpy and the function
811 call breaks register passing arguments we are preparing.
812 So, we process arguments which will be passed by stack first. */
813 struct_size
= args_size
;
814 for(i
= 0; i
< nb_args
; i
++) {
819 bt
= (sv
->type
.t
& VT_BTYPE
);
820 size
= gfunc_arg_size(&sv
->type
);
822 if (using_regs(size
))
823 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
825 if (bt
== VT_STRUCT
) {
826 /* align to stack align size */
827 size
= (size
+ 15) & ~15;
828 /* generate structure store */
830 gen_offs_sp(0x8d, r
, struct_size
);
833 /* generate memcpy call */
834 vset(&sv
->type
, r
| VT_LVAL
, 0);
838 } else if (bt
== VT_LDOUBLE
) {
840 gen_offs_sp(0xdb, 0x107, struct_size
);
845 if (func_scratch
< struct_size
)
846 func_scratch
= struct_size
;
849 struct_size
= args_size
;
851 for(i
= 0; i
< nb_args
; i
++) {
853 bt
= (vtop
->type
.t
& VT_BTYPE
);
855 size
= gfunc_arg_size(&vtop
->type
);
856 if (!using_regs(size
)) {
857 /* align to stack align size */
858 size
= (size
+ 15) & ~15;
861 gen_offs_sp(0x8d, d
, struct_size
);
862 gen_offs_sp(0x89, d
, arg
*8);
864 d
= arg_prepare_reg(arg
);
865 gen_offs_sp(0x8d, d
, struct_size
);
869 if (is_sse_float(vtop
->type
.t
)) {
870 if (tcc_state
->nosse
)
871 tcc_error("SSE disabled");
874 /* movq %xmm0, j*8(%rsp) */
875 gen_offs_sp(0xd60f66, 0x100, arg
*8);
877 /* Load directly to xmmN register */
879 d
= arg_prepare_reg(arg
);
880 /* mov %xmmN, %rxx */
883 o(0xc0 + arg
*8 + REG_VALUE(d
));
886 if (bt
== VT_STRUCT
) {
887 vtop
->type
.ref
= NULL
;
888 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
889 : size
> 1 ? VT_SHORT
: VT_BYTE
;
894 gen_offs_sp(0x89, r
, arg
*8);
896 d
= arg_prepare_reg(arg
);
897 orex(1,d
,r
,0x89); /* mov */
898 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
905 /* Copy R10 and R11 into RCX and RDX, respectively */
907 o(0xd1894c); /* mov %r10, %rcx */
909 o(0xda894c); /* mov %r11, %rdx */
915 if ((vtop
->r
& VT_SYM
) && vtop
->sym
->v
== TOK_alloca
) {
916 /* need to add the "func_scratch" area after alloca */
917 o(0x48); func_alloca
= oad(0x05, func_alloca
); /* add $NN, %rax */
918 #ifdef CONFIG_TCC_BCHECK
919 if (tcc_state
->do_bounds_check
)
920 gen_bounds_call(TOK___bound_alloca_nr
); /* new region */
927 #define FUNC_PROLOG_SIZE 11
929 /* generate function prolog of type 't' */
930 void gfunc_prolog(Sym
*func_sym
)
932 CType
*func_type
= &func_sym
->type
;
933 int addr
, reg_param_index
, bt
, size
;
943 ind
+= FUNC_PROLOG_SIZE
;
944 func_sub_sp_offset
= ind
;
947 sym
= func_type
->ref
;
949 /* if the function returns a structure, then add an
950 implicit pointer parameter */
951 size
= gfunc_arg_size(&func_vt
);
952 if (!using_regs(size
)) {
953 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
959 /* define parameters */
960 while ((sym
= sym
->next
) != NULL
) {
962 bt
= type
->t
& VT_BTYPE
;
963 size
= gfunc_arg_size(type
);
964 if (!using_regs(size
)) {
965 if (reg_param_index
< REGN
) {
966 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
968 sym_push(sym
->v
& ~SYM_FIELD
, type
,
969 VT_LLOCAL
| VT_LVAL
, addr
);
971 if (reg_param_index
< REGN
) {
972 /* save arguments passed by register */
973 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
974 if (tcc_state
->nosse
)
975 tcc_error("SSE disabled");
976 o(0xd60f66); /* movq */
977 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
979 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
982 sym_push(sym
->v
& ~SYM_FIELD
, type
,
983 VT_LOCAL
| VT_LVAL
, addr
);
989 while (reg_param_index
< REGN
) {
991 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
996 #ifdef CONFIG_TCC_BCHECK
997 if (tcc_state
->do_bounds_check
)
1002 /* generate function epilog */
1003 void gfunc_epilog(void)
1007 /* align local size to word & save local variables */
1008 func_scratch
= (func_scratch
+ 15) & -16;
1009 loc
= (loc
& -16) - func_scratch
;
1011 #ifdef CONFIG_TCC_BCHECK
1012 if (tcc_state
->do_bounds_check
)
1013 gen_bounds_epilog();
1016 o(0xc9); /* leave */
1017 if (func_ret_sub
== 0) {
1020 o(0xc2); /* ret n */
1022 g(func_ret_sub
>> 8);
1026 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1030 Sym
*sym
= external_helper_sym(TOK___chkstk
);
1031 oad(0xb8, v
); /* mov stacksize, %eax */
1032 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1033 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
1034 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1036 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1037 o(0xec8148); /* sub rsp, stacksize */
1041 /* add the "func_scratch" area after each alloca seen */
1042 gsym_addr(func_alloca
, -func_scratch
);
1044 cur_text_section
->data_offset
= saved_ind
;
1045 pe_add_unwind_data(ind
, saved_ind
, v
);
1046 ind
= cur_text_section
->data_offset
;
1051 static void gadd_sp(int val
)
1053 if (val
== (char)val
) {
1057 oad(0xc48148, val
); /* add $xxx, %rsp */
1061 typedef enum X86_64_Mode
{
1064 x86_64_mode_integer
,
1069 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1073 else if (a
== x86_64_mode_none
)
1075 else if (b
== x86_64_mode_none
)
1077 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1078 return x86_64_mode_memory
;
1079 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1080 return x86_64_mode_integer
;
1081 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1082 return x86_64_mode_memory
;
1084 return x86_64_mode_sse
;
1087 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1092 switch (ty
->t
& VT_BTYPE
) {
1093 case VT_VOID
: return x86_64_mode_none
;
1102 return x86_64_mode_integer
;
1105 case VT_DOUBLE
: return x86_64_mode_sse
;
1107 case VT_LDOUBLE
: return x86_64_mode_x87
;
1112 mode
= x86_64_mode_none
;
1113 for (f
= f
->next
; f
; f
= f
->next
)
1114 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1122 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1125 int size
, align
, ret_t
= 0;
1127 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1132 mode
= x86_64_mode_integer
;
1134 size
= type_size(ty
, &align
);
1135 *psize
= (size
+ 7) & ~7;
1136 *palign
= (align
+ 7) & ~7;
1139 mode
= x86_64_mode_memory
;
1141 mode
= classify_x86_64_inner(ty
);
1143 case x86_64_mode_integer
:
1157 if ((ty
->t
& VT_BTYPE
) == VT_STRUCT
|| (ty
->t
& VT_UNSIGNED
))
1158 ret_t
|= VT_UNSIGNED
;
1162 case x86_64_mode_x87
:
1167 case x86_64_mode_sse
:
1173 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1176 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1189 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1191 /* This definition must be synced with stdarg.h */
1192 enum __va_arg_type
{
1193 __va_gen_reg
, __va_float_reg
, __va_stack
1195 int size
, align
, reg_count
;
1196 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1198 default: return __va_stack
;
1199 case x86_64_mode_integer
: return __va_gen_reg
;
1200 case x86_64_mode_sse
: return __va_float_reg
;
1204 /* Return the number of registers needed to return the struct, or 0 if
1205 returning via struct pointer. */
1206 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1208 int size
, align
, reg_count
;
1209 *ret_align
= 1; // Never have to re-align return values for x86-64
1211 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1215 static const uint8_t arg_regs
[REGN
] = {
1216 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1219 static int arg_prepare_reg(int idx
) {
1220 if (idx
== 2 || idx
== 3)
1221 /* idx=2: r10, idx=3: r11 */
1224 return arg_regs
[idx
];
1227 /* Generate function call. The function address is pushed first, then
1228 all the parameters in call order. This functions pops all the
1229 parameters and the function address. */
1230 void gfunc_call(int nb_args
)
1234 int size
, align
, r
, args_size
, stack_adjust
, i
, reg_count
, k
;
1235 int nb_reg_args
= 0;
1236 int nb_sse_args
= 0;
1237 int sse_reg
, gen_reg
;
1238 char *onstack
= tcc_malloc((nb_args
+ 1) * sizeof (char));
1240 #ifdef CONFIG_TCC_BCHECK
1241 if (tcc_state
->do_bounds_check
)
1242 gbound_args(nb_args
);
1245 /* calculate the number of integer/float register arguments, remember
1246 arguments to be passed via stack (in onstack[]), and also remember
1247 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1248 to be done in a left-to-right pass over arguments. */
1250 for(i
= nb_args
- 1; i
>= 0; i
--) {
1251 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1252 if (size
== 0) continue;
1253 if (mode
== x86_64_mode_sse
&& nb_sse_args
+ reg_count
<= 8) {
1254 nb_sse_args
+= reg_count
;
1256 } else if (mode
== x86_64_mode_integer
&& nb_reg_args
+ reg_count
<= REGN
) {
1257 nb_reg_args
+= reg_count
;
1259 } else if (mode
== x86_64_mode_none
) {
1262 if (align
== 16 && (stack_adjust
&= 15)) {
1267 stack_adjust
+= size
;
1271 if (nb_sse_args
&& tcc_state
->nosse
)
1272 tcc_error("SSE disabled but floating point arguments passed");
1274 /* fetch cpu flag before generating any code */
1275 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
)
1278 /* for struct arguments, we need to call memcpy and the function
1279 call breaks register passing arguments we are preparing.
1280 So, we process arguments which will be passed by stack first. */
1281 gen_reg
= nb_reg_args
;
1282 sse_reg
= nb_sse_args
;
1285 for (i
= k
= 0; i
< nb_args
;) {
1286 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1288 if (!onstack
[i
+ k
]) {
1292 /* Possibly adjust stack to align SSE boundary. We're processing
1293 args from right to left while allocating happens left to right
1294 (stack grows down), so the adjustment needs to happen _after_
1295 an argument that requires it. */
1297 o(0x50); /* push %rax; aka sub $8,%rsp */
1301 if (onstack
[i
+ k
] == 2)
1307 switch (vtop
->type
.t
& VT_BTYPE
) {
1309 /* allocate the necessary size on stack */
1311 oad(0xec81, size
); /* sub $xxx, %rsp */
1312 /* generate structure store */
1313 r
= get_reg(RC_INT
);
1314 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1315 o(0xe0 + REG_VALUE(r
));
1316 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1323 oad(0xec8148, size
); /* sub $xxx, %rsp */
1324 o(0x7cdb); /* fstpt 0(%rsp) */
1331 assert(mode
== x86_64_mode_sse
);
1333 o(0x50); /* push $rax */
1334 /* movq %xmmN, (%rsp) */
1336 o(0x04 + REG_VALUE(r
)*8);
1341 assert(mode
== x86_64_mode_integer
);
1343 /* XXX: implicit cast ? */
1345 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1357 /* XXX This should be superfluous. */
1358 save_regs(0); /* save used temporary registers */
1360 /* then, we prepare register passing arguments.
1361 Note that we cannot set RDX and RCX in this loop because gv()
1362 may break these temporary registers. Let's use R10 and R11
1364 assert(gen_reg
<= REGN
);
1365 assert(sse_reg
<= 8);
1366 for(i
= 0; i
< nb_args
; i
++) {
1367 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1368 if (size
== 0) continue;
1369 /* Alter stack entry type so that gv() knows how to treat it */
1371 if (mode
== x86_64_mode_sse
) {
1372 if (reg_count
== 2) {
1374 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1375 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1376 /* movaps %xmm1, %xmmN */
1378 o(0xc1 + ((sse_reg
+1) << 3));
1379 /* movaps %xmm0, %xmmN */
1381 o(0xc0 + (sse_reg
<< 3));
1384 assert(reg_count
== 1);
1386 /* Load directly to register */
1387 gv(RC_XMM0
<< sse_reg
);
1389 } else if (mode
== x86_64_mode_integer
) {
1391 /* XXX: implicit cast ? */
1393 gen_reg
-= reg_count
;
1395 d
= arg_prepare_reg(gen_reg
);
1396 orex(1,d
,r
,0x89); /* mov */
1397 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1398 if (reg_count
== 2) {
1399 d
= arg_prepare_reg(gen_reg
+1);
1400 orex(1,d
,vtop
->r2
,0x89); /* mov */
1401 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1406 assert(gen_reg
== 0);
1407 assert(sse_reg
== 0);
1409 /* We shouldn't have many operands on the stack anymore, but the
1410 call address itself is still there, and it might be in %eax
1411 (or edx/ecx) currently, which the below writes would clobber.
1412 So evict all remaining operands here. */
1415 /* Copy R10 and R11 into RDX and RCX, respectively */
1416 if (nb_reg_args
> 2) {
1417 o(0xd2894c); /* mov %r10, %rdx */
1418 if (nb_reg_args
> 3) {
1419 o(0xd9894c); /* mov %r11, %rcx */
1423 if (vtop
->type
.ref
->f
.func_type
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1424 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1431 #define FUNC_PROLOG_SIZE 11
1433 static void push_arg_reg(int i
) {
1435 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1438 /* generate function prolog of type 't' */
1439 void gfunc_prolog(Sym
*func_sym
)
1441 CType
*func_type
= &func_sym
->type
;
1443 int i
, addr
, align
, size
, reg_count
;
1444 int param_addr
= 0, reg_param_index
, sse_param_index
;
1448 sym
= func_type
->ref
;
1449 addr
= PTR_SIZE
* 2;
1451 ind
+= FUNC_PROLOG_SIZE
;
1452 func_sub_sp_offset
= ind
;
1456 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1457 seen_reg_num
= seen_sse_num
= 0;
1458 /* frame pointer and return address */
1459 seen_stack_size
= PTR_SIZE
* 2;
1460 /* count the number of seen parameters */
1461 sym
= func_type
->ref
;
1462 while ((sym
= sym
->next
) != NULL
) {
1464 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1468 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1471 case x86_64_mode_integer
:
1472 if (seen_reg_num
+ reg_count
> REGN
)
1474 seen_reg_num
+= reg_count
;
1477 case x86_64_mode_sse
:
1478 if (seen_sse_num
+ reg_count
> 8)
1480 seen_sse_num
+= reg_count
;
1486 /* movl $0x????????, -0x18(%rbp) */
1488 gen_le32(seen_reg_num
* 8);
1489 /* movl $0x????????, -0x14(%rbp) */
1491 gen_le32(seen_sse_num
* 16 + 48);
1492 /* leaq $0x????????, %r11 */
1494 gen_le32(seen_stack_size
);
1495 /* movq %r11, -0x10(%rbp) */
1497 /* leaq $-192(%rbp), %r11 */
1499 gen_le32(-176 - 24);
1500 /* movq %r11, -0x8(%rbp) */
1503 /* save all register passing arguments */
1504 for (i
= 0; i
< 8; i
++) {
1506 if (!tcc_state
->nosse
) {
1507 o(0xd60f66); /* movq */
1508 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1510 /* movq $0, loc+8(%rbp) */
1515 for (i
= 0; i
< REGN
; i
++) {
1516 push_arg_reg(REGN
-1-i
);
1520 sym
= func_type
->ref
;
1521 reg_param_index
= 0;
1522 sse_param_index
= 0;
1524 /* if the function returns a structure, then add an
1525 implicit pointer parameter */
1526 mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1527 if (mode
== x86_64_mode_memory
) {
1528 push_arg_reg(reg_param_index
);
1532 /* define parameters */
1533 while ((sym
= sym
->next
) != NULL
) {
1535 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1537 case x86_64_mode_sse
:
1538 if (tcc_state
->nosse
)
1539 tcc_error("SSE disabled but floating point arguments used");
1540 if (sse_param_index
+ reg_count
<= 8) {
1541 /* save arguments passed by register */
1542 loc
-= reg_count
* 8;
1544 for (i
= 0; i
< reg_count
; ++i
) {
1545 o(0xd60f66); /* movq */
1546 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1550 addr
= (addr
+ align
- 1) & -align
;
1556 case x86_64_mode_memory
:
1557 case x86_64_mode_x87
:
1558 addr
= (addr
+ align
- 1) & -align
;
1563 case x86_64_mode_integer
: {
1564 if (reg_param_index
+ reg_count
<= REGN
) {
1565 /* save arguments passed by register */
1566 loc
-= reg_count
* 8;
1568 for (i
= 0; i
< reg_count
; ++i
) {
1569 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1573 addr
= (addr
+ align
- 1) & -align
;
1579 default: break; /* nothing to be done for x86_64_mode_none */
1581 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1582 VT_LOCAL
| VT_LVAL
, param_addr
);
1585 #ifdef CONFIG_TCC_BCHECK
1586 if (tcc_state
->do_bounds_check
)
1587 gen_bounds_prolog();
1591 /* generate function epilog */
1592 void gfunc_epilog(void)
1596 #ifdef CONFIG_TCC_BCHECK
1597 if (tcc_state
->do_bounds_check
)
1598 gen_bounds_epilog();
1600 o(0xc9); /* leave */
1601 if (func_ret_sub
== 0) {
1604 o(0xc2); /* ret n */
1606 g(func_ret_sub
>> 8);
1608 /* align local size to word & save local variables */
1609 v
= (-loc
+ 15) & -16;
1611 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1612 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1613 o(0xec8148); /* sub rsp, stacksize */
1620 ST_FUNC
void gen_fill_nops(int bytes
)
1626 /* generate a jump to a label */
1629 return gjmp2(0xe9, t
);
1632 /* generate a jump to a fixed address */
1633 void gjmp_addr(int a
)
1641 oad(0xe9, a
- ind
- 5);
1645 ST_FUNC
int gjmp_append(int n
, int t
)
1648 /* insert vtop->c jump list in t */
1650 uint32_t n1
= n
, n2
;
1651 while ((n2
= read32le(p
= cur_text_section
->data
+ n1
)))
1659 ST_FUNC
int gjmp_cond(int op
, int t
)
1663 /* This was a float compare. If the parity flag is set
1664 the result was unordered. For anything except != this
1665 means false and we don't jump (anding both conditions).
1666 For != this means true (oring both).
1667 Take care about inverting the test. We need to jump
1668 to our target if the result was unordered and test wasn't NE,
1669 otherwise if unordered we don't want to jump. */
1670 int v
= vtop
->cmp_r
;
1672 if (op
^ v
^ (v
!= TOK_NE
))
1673 o(0x067a); /* jp +6 */
1677 t
= gjmp2(0x8a, t
); /* jp t */
1681 t
= gjmp2(op
- 16, t
);
1685 /* generate an integer binary operation */
1686 void gen_opi(int op
)
1691 ll
= is64_type(vtop
[-1].type
.t
);
1692 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1693 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1697 case TOK_ADDC1
: /* add with carry generation */
1700 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1707 /* XXX: generate inc and dec for smaller code ? */
1708 orex(ll
, r
, 0, 0x83);
1709 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1712 orex(ll
, r
, 0, 0x81);
1713 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1716 gv2(RC_INT
, RC_INT
);
1719 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1720 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1723 if (op
>= TOK_ULT
&& op
<= TOK_GT
)
1727 case TOK_SUBC1
: /* sub with carry generation */
1730 case TOK_ADDC2
: /* add with carry use */
1733 case TOK_SUBC2
: /* sub with carry use */
1746 gv2(RC_INT
, RC_INT
);
1749 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1750 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1762 opc
= 0xc0 | (opc
<< 3);
1768 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1769 o(opc
| REG_VALUE(r
));
1770 g(vtop
->c
.i
& (ll
? 63 : 31));
1772 /* we generate the shift in ecx */
1773 gv2(RC_INT
, RC_RCX
);
1775 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1776 o(opc
| REG_VALUE(r
));
1789 /* first operand must be in eax */
1790 /* XXX: need better constraint for second operand */
1791 gv2(RC_RAX
, RC_RCX
);
1796 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1797 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1798 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1799 if (op
== '%' || op
== TOK_UMOD
)
1811 void gen_opl(int op
)
1816 /* generate a floating point operation 'v = t1 op t2' instruction. The
1817 two operands are guaranteed to have the same floating point type */
1818 /* XXX: need to use ST1 too */
1819 void gen_opf(int op
)
1821 int a
, ft
, fc
, swapped
, r
;
1823 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1825 /* convert constants to memory references */
1826 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1831 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1834 /* must put at least one value in the floating point register */
1835 if ((vtop
[-1].r
& VT_LVAL
) &&
1836 (vtop
[0].r
& VT_LVAL
)) {
1842 /* swap the stack if needed so that t1 is the register and t2 is
1843 the memory reference */
1844 if (vtop
[-1].r
& VT_LVAL
) {
1848 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1849 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1850 /* load on stack second operand */
1851 load(TREG_ST0
, vtop
);
1852 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1853 if (op
== TOK_GE
|| op
== TOK_GT
)
1855 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1858 o(0xc9d9); /* fxch %st(1) */
1859 if (op
== TOK_EQ
|| op
== TOK_NE
)
1860 o(0xe9da); /* fucompp */
1862 o(0xd9de); /* fcompp */
1863 o(0xe0df); /* fnstsw %ax */
1865 o(0x45e480); /* and $0x45, %ah */
1866 o(0x40fC80); /* cmp $0x40, %ah */
1867 } else if (op
== TOK_NE
) {
1868 o(0x45e480); /* and $0x45, %ah */
1869 o(0x40f480); /* xor $0x40, %ah */
1871 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1872 o(0x05c4f6); /* test $0x05, %ah */
1875 o(0x45c4f6); /* test $0x45, %ah */
1881 /* no memory reference possible for long double operations */
1882 load(TREG_ST0
, vtop
);
1906 o(0xde); /* fxxxp %st, %st(1) */
1911 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1912 /* if saved lvalue, then we must reload it */
1915 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1917 r
= get_reg(RC_INT
);
1919 v1
.r
= VT_LOCAL
| VT_LVAL
;
1923 vtop
->r
= r
= r
| VT_LVAL
;
1926 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1929 if (op
== TOK_LE
|| op
== TOK_LT
)
1931 if (op
== TOK_LE
|| op
== TOK_GE
) {
1932 op
= 0x93; /* setae */
1934 op
= 0x97; /* seta */
1942 assert(!(vtop
[-1].r
& VT_LVAL
));
1944 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1946 if (op
== TOK_EQ
|| op
== TOK_NE
)
1947 o(0x2e0f); /* ucomisd */
1949 o(0x2f0f); /* comisd */
1951 if (vtop
->r
& VT_LVAL
) {
1952 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
1954 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
1958 vset_VT_CMP(op
| 0x100);
1961 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
1979 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
1982 /* if saved lvalue, then we must reload it */
1983 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
1985 r
= get_reg(RC_INT
);
1987 v1
.r
= VT_LOCAL
| VT_LVAL
;
1991 vtop
->r
= r
= r
| VT_LVAL
;
1994 assert(!(vtop
[-1].r
& VT_LVAL
));
1996 assert(vtop
->r
& VT_LVAL
);
2001 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2009 if (vtop
->r
& VT_LVAL
) {
2010 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2012 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2020 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2021 and 'long long' cases. */
2022 void gen_cvt_itof(int t
)
2024 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2027 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2028 /* signed long long to float/double/long double (unsigned case
2029 is handled generically) */
2030 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2031 o(0x242cdf); /* fildll (%rsp) */
2032 o(0x08c48348); /* add $8, %rsp */
2033 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2034 (VT_INT
| VT_UNSIGNED
)) {
2035 /* unsigned int to float/double/long double */
2036 o(0x6a); /* push $0 */
2038 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2039 o(0x242cdf); /* fildll (%rsp) */
2040 o(0x10c48348); /* add $16, %rsp */
2042 /* int to float/double/long double */
2043 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2044 o(0x2404db); /* fildl (%rsp) */
2045 o(0x08c48348); /* add $8, %rsp */
2049 int r
= get_reg(RC_FLOAT
);
2051 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2052 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2053 (VT_INT
| VT_UNSIGNED
) ||
2054 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2058 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2063 /* convert from one floating point type to another */
2064 void gen_cvt_ftof(int t
)
2072 if (bt
== VT_FLOAT
) {
2074 if (tbt
== VT_DOUBLE
) {
2075 o(0x140f); /* unpcklps */
2076 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2077 o(0x5a0f); /* cvtps2pd */
2078 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2079 } else if (tbt
== VT_LDOUBLE
) {
2081 /* movss %xmm0,-0x10(%rsp) */
2083 o(0x44 + REG_VALUE(vtop
->r
)*8);
2085 o(0xf02444d9); /* flds -0x10(%rsp) */
2088 } else if (bt
== VT_DOUBLE
) {
2090 if (tbt
== VT_FLOAT
) {
2091 o(0x140f66); /* unpcklpd */
2092 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2093 o(0x5a0f66); /* cvtpd2ps */
2094 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2095 } else if (tbt
== VT_LDOUBLE
) {
2097 /* movsd %xmm0,-0x10(%rsp) */
2099 o(0x44 + REG_VALUE(vtop
->r
)*8);
2101 o(0xf02444dd); /* fldl -0x10(%rsp) */
2107 r
= get_reg(RC_FLOAT
);
2108 if (tbt
== VT_DOUBLE
) {
2109 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2110 /* movsd -0x10(%rsp),%xmm0 */
2112 o(0x44 + REG_VALUE(r
)*8);
2115 } else if (tbt
== VT_FLOAT
) {
2116 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2117 /* movss -0x10(%rsp),%xmm0 */
2119 o(0x44 + REG_VALUE(r
)*8);
2126 /* convert fp to int 't' type */
2127 void gen_cvt_ftoi(int t
)
2129 int ft
, bt
, size
, r
;
2132 if (bt
== VT_LDOUBLE
) {
2133 gen_cvt_ftof(VT_DOUBLE
);
2143 r
= get_reg(RC_INT
);
2144 if (bt
== VT_FLOAT
) {
2146 } else if (bt
== VT_DOUBLE
) {
2151 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2152 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2156 // Generate sign extension from 32 to 64 bits:
2157 ST_FUNC
void gen_cvt_sxtw(void)
2160 /* x86_64 specific: movslq */
2162 o(0xc0 + (REG_VALUE(r
) << 3) + REG_VALUE(r
));
2165 /* char/short to int conversion */
2166 ST_FUNC
void gen_cvt_csti(int t
)
2170 sz
= !(t
& VT_UNSIGNED
);
2171 xl
= (t
& VT_BTYPE
) == VT_SHORT
;
2172 ll
= (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
;
2173 orex(ll
, r
, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2174 | (sz
<< 3 | xl
) << 8
2175 | (REG_VALUE(r
) << 3 | REG_VALUE(r
)) << 16
2179 /* computed goto support */
2186 /* Save the stack pointer onto the stack and return the location of its address */
2187 ST_FUNC
void gen_vla_sp_save(int addr
) {
2188 /* mov %rsp,addr(%rbp)*/
2189 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2192 /* Restore the SP from a location on the stack */
2193 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2194 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2197 #ifdef TCC_TARGET_PE
2198 /* Save result of gen_vla_alloc onto the stack */
2199 ST_FUNC
void gen_vla_result(int addr
) {
2200 /* mov %rax,addr(%rbp)*/
2201 gen_modrm64(0x89, TREG_RAX
, VT_LOCAL
, NULL
, addr
);
2205 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2206 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2209 #if defined(CONFIG_TCC_BCHECK)
2210 use_call
= tcc_state
->do_bounds_check
;
2212 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2217 vpush_helper_func(TOK_alloca
);
2218 vswap(); /* Move alloca ref past allocation size */
2223 r
= gv(RC_INT
); /* allocation size */
2226 o(0xe0 | REG_VALUE(r
));
2227 /* We align to 16 bytes rather than align */
2235 /* end of x86-64 code generator */
2236 /*************************************************************/
2237 #endif /* ! TARGET_DEFS_ONLY */
2238 /******************************************************/