2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
38 #define RC_ST0 0x0080 /* only for long double */
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_LRET RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_QRET RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_LRET TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_QRET TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
105 /******************************************************/
106 #else /* ! TARGET_DEFS_ONLY */
107 /******************************************************/
111 ST_DATA
const int reg_classes
[NB_REGS
] = {
112 /* eax */ RC_INT
| RC_RAX
,
113 /* ecx */ RC_INT
| RC_RCX
,
114 /* edx */ RC_INT
| RC_RDX
,
128 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
129 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
130 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
131 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
132 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
133 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
134 /* xmm6 an xmm7 are included so gv() can be used on them,
135 but they are not tagged with RC_FLOAT because they are
136 callee saved on Windows */
142 static unsigned long func_sub_sp_offset
;
143 static int func_ret_sub
;
145 /* XXX: make it faster ? */
146 ST_FUNC
void g(int c
)
152 if (ind1
> cur_text_section
->data_allocated
)
153 section_realloc(cur_text_section
, ind1
);
154 cur_text_section
->data
[ind
] = c
;
158 ST_FUNC
void o(unsigned int c
)
166 ST_FUNC
void gen_le16(int v
)
172 ST_FUNC
void gen_le32(int c
)
180 ST_FUNC
void gen_le64(int64_t c
)
192 static void orex(int ll
, int r
, int r2
, int b
)
194 if ((r
& VT_VALMASK
) >= VT_CONST
)
196 if ((r2
& VT_VALMASK
) >= VT_CONST
)
198 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
199 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
203 /* output a symbol and patch all calls to it */
204 ST_FUNC
void gsym_addr(int t
, int a
)
207 unsigned char *ptr
= cur_text_section
->data
+ t
;
208 uint32_t n
= read32le(ptr
); /* next value */
209 write32le(ptr
, a
- t
- 4);
220 static int is64_type(int t
)
222 return ((t
& VT_BTYPE
) == VT_PTR
||
223 (t
& VT_BTYPE
) == VT_FUNC
||
224 (t
& VT_BTYPE
) == VT_LLONG
);
227 /* instruction + 4 bytes data. Return the address of the data */
228 static int oad(int c
, int s
)
239 /* generate jmp to a label */
240 #define gjmp2(instr,lbl) oad(instr,lbl)
242 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
245 greloca(cur_text_section
, sym
, ind
, R_X86_64_32S
, c
), c
=0;
249 /* output constant with relocation if 'r & VT_SYM' is true */
250 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
253 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
261 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
265 /* output got address with relocation */
266 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
269 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
270 get_tok_str(sym
->v
, NULL
), c
, r
,
271 cur_text_section
->data
[ind
-3],
272 cur_text_section
->data
[ind
-2],
273 cur_text_section
->data
[ind
-1]
276 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
279 /* we use add c, %xxx for displacement */
281 o(0xc0 + REG_VALUE(r
));
286 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
288 op_reg
= REG_VALUE(op_reg
) << 3;
289 if ((r
& VT_VALMASK
) == VT_CONST
) {
290 /* constant memory reference */
292 /* Absolute memory reference */
293 o(0x04 | op_reg
); /* [sib] | destreg */
294 oad(0x25, c
); /* disp32 */
296 o(0x05 | op_reg
); /* (%rip)+disp32 | destreg */
298 gen_gotpcrel(r
, sym
, c
);
300 gen_addrpc32(r
, sym
, c
);
303 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
304 /* currently, we use only ebp as base */
306 /* short reference */
310 oad(0x85 | op_reg
, c
);
312 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
314 g(0x80 | op_reg
| REG_VALUE(r
));
317 g(0x00 | op_reg
| REG_VALUE(r
));
320 g(0x00 | op_reg
| REG_VALUE(r
));
324 /* generate a modrm reference. 'op_reg' contains the additional 3
326 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
328 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
331 /* generate a modrm reference. 'op_reg' contains the additional 3
333 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
336 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
337 orex(1, r
, op_reg
, opcode
);
338 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
342 /* load 'r' from value 'sv' */
343 void load(int r
, SValue
*sv
)
345 int v
, t
, ft
, fc
, fr
;
350 sv
= pe_getimport(sv
, &v2
);
354 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
356 if (fc
!= sv
->c
.i
&& (fr
& VT_SYM
))
357 tcc_error("64 bit addend in load");
359 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
361 #ifndef TCC_TARGET_PE
362 /* we use indirect access via got */
363 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
364 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
365 /* use the result register as a temporal register */
366 int tr
= r
| TREG_MEM
;
368 /* we cannot use float registers as a temporal register */
369 tr
= get_reg(RC_INT
) | TREG_MEM
;
371 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
373 /* load from the temporal register */
381 if (v
== VT_LLOCAL
) {
383 v1
.r
= VT_LOCAL
| VT_LVAL
;
386 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
387 fr
= get_reg(RC_INT
);
391 /* If the addends doesn't fit into a 32bit signed
392 we must use a 64bit move. We've checked above
393 that this doesn't have a sym associated. */
394 v1
.type
.t
= VT_LLONG
;
398 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
399 fr
= get_reg(RC_INT
);
404 /* Like GCC we can load from small enough properly sized
405 structs and unions as well.
406 XXX maybe move to generic operand handling, but should
407 occur only with asm, so tccasm.c might also be a better place */
408 if ((ft
& VT_BTYPE
) == VT_STRUCT
) {
410 switch (type_size(&sv
->type
, &align
)) {
411 case 1: ft
= VT_BYTE
; break;
412 case 2: ft
= VT_SHORT
; break;
413 case 4: ft
= VT_INT
; break;
414 case 8: ft
= VT_LLONG
; break;
416 tcc_error("invalid aggregate type for register load");
420 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
422 r
= REG_VALUE(r
); /* movd */
423 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
424 b
= 0x7e0ff3; /* movq */
426 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
427 b
= 0xdb, r
= 5; /* fldt */
428 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
429 b
= 0xbe0f; /* movsbl */
430 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
431 b
= 0xb60f; /* movzbl */
432 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
433 b
= 0xbf0f; /* movswl */
434 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
435 b
= 0xb70f; /* movzwl */
437 assert(((ft
& VT_BTYPE
) == VT_INT
)
438 || ((ft
& VT_BTYPE
) == VT_LLONG
)
439 || ((ft
& VT_BTYPE
) == VT_PTR
)
440 || ((ft
& VT_BTYPE
) == VT_FUNC
)
446 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
449 gen_modrm(r
, fr
, sv
->sym
, fc
);
456 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
457 gen_addrpc32(fr
, sv
->sym
, fc
);
459 if (sv
->sym
->type
.t
& VT_STATIC
) {
461 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
462 gen_addrpc32(fr
, sv
->sym
, fc
);
465 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
466 gen_gotpcrel(r
, sv
->sym
, fc
);
469 } else if (is64_type(ft
)) {
470 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
473 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
476 } else if (v
== VT_LOCAL
) {
477 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
478 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
479 } else if (v
== VT_CMP
) {
481 if ((fc
& ~0x100) != TOK_NE
)
482 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
484 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
487 /* This was a float compare. If the parity bit is
488 set the result was unordered, meaning false for everything
489 except TOK_NE, and true for TOK_NE. */
491 o(0x037a + (REX_BASE(r
) << 8));
493 orex(0,r
,0, 0x0f); /* setxx %br */
495 o(0xc0 + REG_VALUE(r
));
496 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
499 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
500 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
503 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
505 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
507 /* gen_cvt_ftof(VT_DOUBLE); */
508 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
509 /* movsd -0x10(%rsp),%xmmN */
511 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
514 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
515 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
518 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
521 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
523 } else if (r
== TREG_ST0
) {
524 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
525 /* gen_cvt_ftof(VT_LDOUBLE); */
526 /* movsd %xmmN,-0x10(%rsp) */
528 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
530 o(0xf02444dd); /* fldl -0x10(%rsp) */
533 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
539 /* store register 'r' in lvalue 'v' */
540 void store(int r
, SValue
*v
)
544 /* store the REX prefix in this variable when PIC is enabled */
549 v
= pe_getimport(v
, &v2
);
552 fr
= v
->r
& VT_VALMASK
;
555 if (fc
!= v
->c
.i
&& (fr
& VT_SYM
))
556 tcc_error("64 bit addend in store");
557 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
560 #ifndef TCC_TARGET_PE
561 /* we need to access the variable via got */
562 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
563 /* mov xx(%rip), %r11 */
565 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
566 pic
= is64_type(bt
) ? 0x49 : 0x41;
570 /* XXX: incorrect if float reg to reg */
571 if (bt
== VT_FLOAT
) {
574 o(0x7e0f); /* movd */
576 } else if (bt
== VT_DOUBLE
) {
579 o(0xd60f); /* movq */
581 } else if (bt
== VT_LDOUBLE
) {
582 o(0xc0d9); /* fld %st(0) */
590 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
592 else if (is64_type(bt
))
598 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
603 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
604 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
605 } else if (fr
!= r
) {
606 /* XXX: don't we really come here? */
608 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
611 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
612 gen_modrm(r
, v
->r
, v
->sym
, fc
);
613 } else if (fr
!= r
) {
614 /* XXX: don't we really come here? */
616 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
621 /* 'is_jmp' is '1' if it is a jump */
622 static void gcall_or_jmp(int is_jmp
)
625 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
626 ((vtop
->r
& VT_SYM
) || (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
628 if (vtop
->r
& VT_SYM
) {
629 /* relocation case */
631 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
633 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
636 /* put an empty PC32 relocation */
637 put_elf_reloca(symtab_section
, cur_text_section
,
638 ind
+ 1, R_X86_64_PC32
, 0, (int)(vtop
->c
.i
-4));
640 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
642 /* otherwise, indirect call */
646 o(0xff); /* call/jmp *r */
647 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
651 #if defined(CONFIG_TCC_BCHECK)
652 #ifndef TCC_TARGET_PE
653 static addr_t func_bound_offset
;
654 static unsigned long func_bound_ind
;
657 static void gen_static_call(int v
)
659 Sym
*sym
= external_global_sym(v
, &func_old_type
, 0);
661 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
664 /* generate a bounded pointer addition */
665 ST_FUNC
void gen_bounded_ptr_add(void)
667 /* save all temporary registers */
670 /* prepare fast x86_64 function call */
672 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
676 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
679 /* do a fast function call */
680 gen_static_call(TOK___bound_ptr_add
);
682 /* returned pointer is in rax */
684 vtop
->r
= TREG_RAX
| VT_BOUNDED
;
687 /* relocation offset of the bounding function call point */
688 vtop
->c
.i
= (cur_text_section
->reloc
->data_offset
- sizeof(ElfW(Rela
)));
691 /* patch pointer addition in vtop so that pointer dereferencing is
693 ST_FUNC
void gen_bounded_ptr_deref(void)
701 /* XXX: put that code in generic part of tcc */
702 if (!is_float(vtop
->type
.t
)) {
703 if (vtop
->r
& VT_LVAL_BYTE
)
705 else if (vtop
->r
& VT_LVAL_SHORT
)
709 size
= type_size(&vtop
->type
, &align
);
711 case 1: func
= TOK___bound_ptr_indir1
; break;
712 case 2: func
= TOK___bound_ptr_indir2
; break;
713 case 4: func
= TOK___bound_ptr_indir4
; break;
714 case 8: func
= TOK___bound_ptr_indir8
; break;
715 case 12: func
= TOK___bound_ptr_indir12
; break;
716 case 16: func
= TOK___bound_ptr_indir16
; break;
718 tcc_error("unhandled size when dereferencing bounded pointer");
723 sym
= external_global_sym(func
, &func_old_type
, 0);
725 put_extern_sym(sym
, NULL
, 0, 0);
727 /* patch relocation */
728 /* XXX: find a better solution ? */
730 rel
= (ElfW(Rela
) *)(cur_text_section
->reloc
->data
+ vtop
->c
.i
);
731 rel
->r_info
= ELF64_R_INFO(sym
->c
, ELF64_R_TYPE(rel
->r_info
));
738 static const uint8_t arg_regs
[REGN
] = {
739 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
742 /* Prepare arguments in R10 and R11 rather than RCX and RDX
743 because gv() will not ever use these */
744 static int arg_prepare_reg(int idx
) {
745 if (idx
== 0 || idx
== 1)
746 /* idx=0: r10, idx=1: r11 */
749 return arg_regs
[idx
];
752 static int func_scratch
, func_alloca
;
754 /* Generate function call. The function address is pushed first, then
755 all the parameters in call order. This functions pops all the
756 parameters and the function address. */
758 static void gen_offs_sp(int b
, int r
, int d
)
760 orex(1,0,r
& 0x100 ? 0 : r
, b
);
762 o(0x2444 | (REG_VALUE(r
) << 3));
765 o(0x2484 | (REG_VALUE(r
) << 3));
770 static int using_regs(int size
)
772 return !(size
> 8 || (size
& (size
- 1)));
775 /* Return the number of registers needed to return the struct, or 0 if
776 returning via struct pointer. */
777 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
780 *ret_align
= 1; // Never have to re-align return values for x86-64
782 size
= type_size(vt
, &align
);
783 if (!using_regs(size
))
797 static int is_sse_float(int t
) {
800 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
803 static int gfunc_arg_size(CType
*type
) {
805 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
807 return type_size(type
, &align
);
810 void gfunc_call(int nb_args
)
812 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
815 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
818 /* for struct arguments, we need to call memcpy and the function
819 call breaks register passing arguments we are preparing.
820 So, we process arguments which will be passed by stack first. */
821 struct_size
= args_size
;
822 for(i
= 0; i
< nb_args
; i
++) {
827 bt
= (sv
->type
.t
& VT_BTYPE
);
828 size
= gfunc_arg_size(&sv
->type
);
830 if (using_regs(size
))
831 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
833 if (bt
== VT_STRUCT
) {
834 /* align to stack align size */
835 size
= (size
+ 15) & ~15;
836 /* generate structure store */
838 gen_offs_sp(0x8d, r
, struct_size
);
841 /* generate memcpy call */
842 vset(&sv
->type
, r
| VT_LVAL
, 0);
846 } else if (bt
== VT_LDOUBLE
) {
848 gen_offs_sp(0xdb, 0x107, struct_size
);
853 if (func_scratch
< struct_size
)
854 func_scratch
= struct_size
;
857 struct_size
= args_size
;
859 for(i
= 0; i
< nb_args
; i
++) {
861 bt
= (vtop
->type
.t
& VT_BTYPE
);
863 size
= gfunc_arg_size(&vtop
->type
);
864 if (!using_regs(size
)) {
865 /* align to stack align size */
866 size
= (size
+ 15) & ~15;
869 gen_offs_sp(0x8d, d
, struct_size
);
870 gen_offs_sp(0x89, d
, arg
*8);
872 d
= arg_prepare_reg(arg
);
873 gen_offs_sp(0x8d, d
, struct_size
);
877 if (is_sse_float(vtop
->type
.t
)) {
878 if (tcc_state
->nosse
)
879 tcc_error("SSE disabled");
880 gv(RC_XMM0
); /* only use one float register */
882 /* movq %xmm0, j*8(%rsp) */
883 gen_offs_sp(0xd60f66, 0x100, arg
*8);
885 /* movaps %xmm0, %xmmN */
887 o(0xc0 + (arg
<< 3));
888 d
= arg_prepare_reg(arg
);
889 /* mov %xmm0, %rxx */
892 o(0xc0 + REG_VALUE(d
));
895 if (bt
== VT_STRUCT
) {
896 vtop
->type
.ref
= NULL
;
897 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
898 : size
> 1 ? VT_SHORT
: VT_BYTE
;
903 gen_offs_sp(0x89, r
, arg
*8);
905 d
= arg_prepare_reg(arg
);
906 orex(1,d
,r
,0x89); /* mov */
907 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
915 /* Copy R10 and R11 into RCX and RDX, respectively */
917 o(0xd1894c); /* mov %r10, %rcx */
919 o(0xda894c); /* mov %r11, %rdx */
925 if ((vtop
->r
& VT_SYM
) && vtop
->sym
->v
== TOK_alloca
) {
926 /* need to add the "func_scratch" area after alloca */
927 o(0x0548), gen_le32(func_alloca
), func_alloca
= ind
- 4;
930 /* other compilers don't clear the upper bits when returning char/short */
931 bt
= vtop
->type
.ref
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
);
932 if (bt
== (VT_BYTE
| VT_UNSIGNED
))
933 o(0xc0b60f); /* movzbl %al, %eax */
934 else if (bt
== VT_BYTE
)
935 o(0xc0be0f); /* movsbl %al, %eax */
936 else if (bt
== VT_SHORT
)
938 else if (bt
== (VT_SHORT
| VT_UNSIGNED
))
939 o(0xc0b70f); /* movzbl %al, %eax */
940 #if 0 /* handled in gen_cast() */
941 else if (bt
== VT_INT
)
942 o(0x9848); /* cltq */
943 else if (bt
== (VT_INT
| VT_UNSIGNED
))
944 o(0xc089); /* mov %eax,%eax */
950 #define FUNC_PROLOG_SIZE 11
952 /* generate function prolog of type 't' */
953 void gfunc_prolog(CType
*func_type
)
955 int addr
, reg_param_index
, bt
, size
;
965 ind
+= FUNC_PROLOG_SIZE
;
966 func_sub_sp_offset
= ind
;
969 sym
= func_type
->ref
;
971 /* if the function returns a structure, then add an
972 implicit pointer parameter */
974 func_var
= (sym
->f
.func_type
== FUNC_ELLIPSIS
);
975 size
= gfunc_arg_size(&func_vt
);
976 if (!using_regs(size
)) {
977 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
983 /* define parameters */
984 while ((sym
= sym
->next
) != NULL
) {
986 bt
= type
->t
& VT_BTYPE
;
987 size
= gfunc_arg_size(type
);
988 if (!using_regs(size
)) {
989 if (reg_param_index
< REGN
) {
990 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
992 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LLOCAL
| VT_LVAL
, addr
);
994 if (reg_param_index
< REGN
) {
995 /* save arguments passed by register */
996 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
997 if (tcc_state
->nosse
)
998 tcc_error("SSE disabled");
999 o(0xd60f66); /* movq */
1000 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
1002 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
1005 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
1011 while (reg_param_index
< REGN
) {
1012 if (func_type
->ref
->f
.func_type
== FUNC_ELLIPSIS
) {
1013 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
1020 /* generate function epilog */
1021 void gfunc_epilog(void)
1025 o(0xc9); /* leave */
1026 if (func_ret_sub
== 0) {
1029 o(0xc2); /* ret n */
1031 g(func_ret_sub
>> 8);
1035 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1036 /* align local size to word & save local variables */
1037 v
= (func_scratch
+ -loc
+ 15) & -16;
1040 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
1041 oad(0xb8, v
); /* mov stacksize, %eax */
1042 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1043 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
1044 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1046 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1047 o(0xec8148); /* sub rsp, stacksize */
1051 /* add the "func_scratch" area after each alloca seen */
1052 while (func_alloca
) {
1053 unsigned char *ptr
= cur_text_section
->data
+ func_alloca
;
1054 func_alloca
= read32le(ptr
);
1055 write32le(ptr
, func_scratch
);
1058 cur_text_section
->data_offset
= saved_ind
;
1059 pe_add_unwind_data(ind
, saved_ind
, v
);
1060 ind
= cur_text_section
->data_offset
;
1065 static void gadd_sp(int val
)
1067 if (val
== (char)val
) {
1071 oad(0xc48148, val
); /* add $xxx, %rsp */
1075 typedef enum X86_64_Mode
{
1078 x86_64_mode_integer
,
1083 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1087 else if (a
== x86_64_mode_none
)
1089 else if (b
== x86_64_mode_none
)
1091 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1092 return x86_64_mode_memory
;
1093 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1094 return x86_64_mode_integer
;
1095 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1096 return x86_64_mode_memory
;
1098 return x86_64_mode_sse
;
1101 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1106 switch (ty
->t
& VT_BTYPE
) {
1107 case VT_VOID
: return x86_64_mode_none
;
1116 return x86_64_mode_integer
;
1119 case VT_DOUBLE
: return x86_64_mode_sse
;
1121 case VT_LDOUBLE
: return x86_64_mode_x87
;
1126 mode
= x86_64_mode_none
;
1127 for (f
= f
->next
; f
; f
= f
->next
)
1128 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1136 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1139 int size
, align
, ret_t
= 0;
1141 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1146 mode
= x86_64_mode_integer
;
1148 size
= type_size(ty
, &align
);
1149 *psize
= (size
+ 7) & ~7;
1150 *palign
= (align
+ 7) & ~7;
1153 mode
= x86_64_mode_memory
;
1155 mode
= classify_x86_64_inner(ty
);
1157 case x86_64_mode_integer
:
1163 ret_t
= (size
> 4) ? VT_LLONG
: VT_INT
;
1167 case x86_64_mode_x87
:
1172 case x86_64_mode_sse
:
1178 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1181 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1194 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1196 /* This definition must be synced with stdarg.h */
1197 enum __va_arg_type
{
1198 __va_gen_reg
, __va_float_reg
, __va_stack
1200 int size
, align
, reg_count
;
1201 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1203 default: return __va_stack
;
1204 case x86_64_mode_integer
: return __va_gen_reg
;
1205 case x86_64_mode_sse
: return __va_float_reg
;
1209 /* Return the number of registers needed to return the struct, or 0 if
1210 returning via struct pointer. */
1211 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1213 int size
, align
, reg_count
;
1214 *ret_align
= 1; // Never have to re-align return values for x86-64
1216 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1220 static const uint8_t arg_regs
[REGN
] = {
1221 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1224 static int arg_prepare_reg(int idx
) {
1225 if (idx
== 2 || idx
== 3)
1226 /* idx=2: r10, idx=3: r11 */
1229 return arg_regs
[idx
];
1232 /* Generate function call. The function address is pushed first, then
1233 all the parameters in call order. This functions pops all the
1234 parameters and the function address. */
1235 void gfunc_call(int nb_args
)
1239 int size
, align
, r
, args_size
, stack_adjust
, i
, reg_count
;
1240 int nb_reg_args
= 0;
1241 int nb_sse_args
= 0;
1242 int sse_reg
, gen_reg
;
1243 char _onstack
[nb_args
], *onstack
= _onstack
;
1245 /* calculate the number of integer/float register arguments, remember
1246 arguments to be passed via stack (in onstack[]), and also remember
1247 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1248 to be done in a left-to-right pass over arguments. */
1250 for(i
= nb_args
- 1; i
>= 0; i
--) {
1251 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1252 if (mode
== x86_64_mode_sse
&& nb_sse_args
+ reg_count
<= 8) {
1253 nb_sse_args
+= reg_count
;
1255 } else if (mode
== x86_64_mode_integer
&& nb_reg_args
+ reg_count
<= REGN
) {
1256 nb_reg_args
+= reg_count
;
1258 } else if (mode
== x86_64_mode_none
) {
1261 if (align
== 16 && (stack_adjust
&= 15)) {
1266 stack_adjust
+= size
;
1270 if (nb_sse_args
&& tcc_state
->nosse
)
1271 tcc_error("SSE disabled but floating point arguments passed");
1273 /* fetch cpu flag before generating any code */
1274 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
1277 /* for struct arguments, we need to call memcpy and the function
1278 call breaks register passing arguments we are preparing.
1279 So, we process arguments which will be passed by stack first. */
1280 gen_reg
= nb_reg_args
;
1281 sse_reg
= nb_sse_args
;
1284 for (i
= 0; i
< nb_args
;) {
1285 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1290 /* Possibly adjust stack to align SSE boundary. We're processing
1291 args from right to left while allocating happens left to right
1292 (stack grows down), so the adjustment needs to happen _after_
1293 an argument that requires it. */
1295 o(0x50); /* push %rax; aka sub $8,%rsp */
1299 if (onstack
[i
] == 2)
1304 switch (vtop
->type
.t
& VT_BTYPE
) {
1306 /* allocate the necessary size on stack */
1308 oad(0xec81, size
); /* sub $xxx, %rsp */
1309 /* generate structure store */
1310 r
= get_reg(RC_INT
);
1311 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1312 o(0xe0 + REG_VALUE(r
));
1313 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1320 oad(0xec8148, size
); /* sub $xxx, %rsp */
1321 o(0x7cdb); /* fstpt 0(%rsp) */
1328 assert(mode
== x86_64_mode_sse
);
1330 o(0x50); /* push $rax */
1331 /* movq %xmmN, (%rsp) */
1333 o(0x04 + REG_VALUE(r
)*8);
1338 assert(mode
== x86_64_mode_integer
);
1340 /* XXX: implicit cast ? */
1342 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1352 /* XXX This should be superfluous. */
1353 save_regs(0); /* save used temporary registers */
1355 /* then, we prepare register passing arguments.
1356 Note that we cannot set RDX and RCX in this loop because gv()
1357 may break these temporary registers. Let's use R10 and R11
1359 assert(gen_reg
<= REGN
);
1360 assert(sse_reg
<= 8);
1361 for(i
= 0; i
< nb_args
; i
++) {
1362 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1363 /* Alter stack entry type so that gv() knows how to treat it */
1365 if (mode
== x86_64_mode_sse
) {
1366 if (reg_count
== 2) {
1368 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1369 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1370 /* movaps %xmm0, %xmmN */
1372 o(0xc0 + (sse_reg
<< 3));
1373 /* movaps %xmm1, %xmmN */
1375 o(0xc1 + ((sse_reg
+1) << 3));
1378 assert(reg_count
== 1);
1380 /* Load directly to register */
1381 gv(RC_XMM0
<< sse_reg
);
1383 } else if (mode
== x86_64_mode_integer
) {
1385 /* XXX: implicit cast ? */
1387 gen_reg
-= reg_count
;
1389 d
= arg_prepare_reg(gen_reg
);
1390 orex(1,d
,r
,0x89); /* mov */
1391 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1392 if (reg_count
== 2) {
1393 d
= arg_prepare_reg(gen_reg
+1);
1394 orex(1,d
,vtop
->r2
,0x89); /* mov */
1395 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1400 assert(gen_reg
== 0);
1401 assert(sse_reg
== 0);
1403 /* We shouldn't have many operands on the stack anymore, but the
1404 call address itself is still there, and it might be in %eax
1405 (or edx/ecx) currently, which the below writes would clobber.
1406 So evict all remaining operands here. */
1409 /* Copy R10 and R11 into RDX and RCX, respectively */
1410 if (nb_reg_args
> 2) {
1411 o(0xd2894c); /* mov %r10, %rdx */
1412 if (nb_reg_args
> 3) {
1413 o(0xd9894c); /* mov %r11, %rcx */
1417 if (vtop
->type
.ref
->f
.func_type
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1418 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1426 #define FUNC_PROLOG_SIZE 11
1428 static void push_arg_reg(int i
) {
1430 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1433 /* generate function prolog of type 't' */
1434 void gfunc_prolog(CType
*func_type
)
1437 int i
, addr
, align
, size
, reg_count
;
1438 int param_addr
= 0, reg_param_index
, sse_param_index
;
1442 sym
= func_type
->ref
;
1443 addr
= PTR_SIZE
* 2;
1445 ind
+= FUNC_PROLOG_SIZE
;
1446 func_sub_sp_offset
= ind
;
1449 if (sym
->f
.func_type
== FUNC_ELLIPSIS
) {
1450 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1451 seen_reg_num
= seen_sse_num
= 0;
1452 /* frame pointer and return address */
1453 seen_stack_size
= PTR_SIZE
* 2;
1454 /* count the number of seen parameters */
1455 sym
= func_type
->ref
;
1456 while ((sym
= sym
->next
) != NULL
) {
1458 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1462 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1465 case x86_64_mode_integer
:
1466 if (seen_reg_num
+ reg_count
> REGN
)
1468 seen_reg_num
+= reg_count
;
1471 case x86_64_mode_sse
:
1472 if (seen_sse_num
+ reg_count
> 8)
1474 seen_sse_num
+= reg_count
;
1480 /* movl $0x????????, -0x10(%rbp) */
1482 gen_le32(seen_reg_num
* 8);
1483 /* movl $0x????????, -0xc(%rbp) */
1485 gen_le32(seen_sse_num
* 16 + 48);
1486 /* movl $0x????????, -0x8(%rbp) */
1488 gen_le32(seen_stack_size
);
1490 /* save all register passing arguments */
1491 for (i
= 0; i
< 8; i
++) {
1493 if (!tcc_state
->nosse
) {
1494 o(0xd60f66); /* movq */
1495 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1497 /* movq $0, loc+8(%rbp) */
1502 for (i
= 0; i
< REGN
; i
++) {
1503 push_arg_reg(REGN
-1-i
);
1507 sym
= func_type
->ref
;
1508 reg_param_index
= 0;
1509 sse_param_index
= 0;
1511 /* if the function returns a structure, then add an
1512 implicit pointer parameter */
1513 func_vt
= sym
->type
;
1514 mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1515 if (mode
== x86_64_mode_memory
) {
1516 push_arg_reg(reg_param_index
);
1520 /* define parameters */
1521 while ((sym
= sym
->next
) != NULL
) {
1523 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1525 case x86_64_mode_sse
:
1526 if (tcc_state
->nosse
)
1527 tcc_error("SSE disabled but floating point arguments used");
1528 if (sse_param_index
+ reg_count
<= 8) {
1529 /* save arguments passed by register */
1530 loc
-= reg_count
* 8;
1532 for (i
= 0; i
< reg_count
; ++i
) {
1533 o(0xd60f66); /* movq */
1534 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1538 addr
= (addr
+ align
- 1) & -align
;
1544 case x86_64_mode_memory
:
1545 case x86_64_mode_x87
:
1546 addr
= (addr
+ align
- 1) & -align
;
1551 case x86_64_mode_integer
: {
1552 if (reg_param_index
+ reg_count
<= REGN
) {
1553 /* save arguments passed by register */
1554 loc
-= reg_count
* 8;
1556 for (i
= 0; i
< reg_count
; ++i
) {
1557 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1561 addr
= (addr
+ align
- 1) & -align
;
1567 default: break; /* nothing to be done for x86_64_mode_none */
1569 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1570 VT_LOCAL
| VT_LVAL
, param_addr
);
1573 #ifdef CONFIG_TCC_BCHECK
1574 /* leave some room for bound checking code */
1575 if (tcc_state
->do_bounds_check
) {
1576 func_bound_offset
= lbounds_section
->data_offset
;
1577 func_bound_ind
= ind
;
1578 oad(0xb8, 0); /* lbound section pointer */
1579 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1580 oad(0xb8, 0); /* call to function */
1585 /* generate function epilog */
1586 void gfunc_epilog(void)
1590 #ifdef CONFIG_TCC_BCHECK
1591 if (tcc_state
->do_bounds_check
1592 && func_bound_offset
!= lbounds_section
->data_offset
)
1598 /* add end of table info */
1599 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
1602 /* generate bound local allocation */
1603 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
1604 func_bound_offset
, lbounds_section
->data_offset
);
1606 ind
= func_bound_ind
;
1607 greloca(cur_text_section
, sym_data
, ind
+ 1, R_X86_64_64
, 0);
1609 gen_static_call(TOK___bound_local_new
);
1612 /* generate bound check local freeing */
1613 o(0x5250); /* save returned value, if any */
1614 greloca(cur_text_section
, sym_data
, ind
+ 1, R_X86_64_64
, 0);
1615 oad(0xb8, 0); /* mov xxx, %rax */
1616 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1617 gen_static_call(TOK___bound_local_delete
);
1618 o(0x585a); /* restore returned value, if any */
1621 o(0xc9); /* leave */
1622 if (func_ret_sub
== 0) {
1625 o(0xc2); /* ret n */
1627 g(func_ret_sub
>> 8);
1629 /* align local size to word & save local variables */
1630 v
= (-loc
+ 15) & -16;
1632 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1633 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1634 o(0xec8148); /* sub rsp, stacksize */
1641 /* generate a jump to a label */
1644 return gjmp2(0xe9, t
);
1647 /* generate a jump to a fixed address */
1648 void gjmp_addr(int a
)
1656 oad(0xe9, a
- ind
- 5);
1660 ST_FUNC
void gtst_addr(int inv
, int a
)
1662 int v
= vtop
->r
& VT_VALMASK
;
1664 inv
^= (vtop
--)->c
.i
;
1671 oad(inv
- 16, a
- 4);
1673 } else if ((v
& ~1) == VT_JMP
) {
1674 if ((v
& 1) != inv
) {
1686 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1687 ST_FUNC
int gtst(int inv
, int t
)
1689 int v
= vtop
->r
& VT_VALMASK
;
1691 if (nocode_wanted
) {
1693 } else if (v
== VT_CMP
) {
1694 /* fast case : can jump directly since flags are set */
1695 if (vtop
->c
.i
& 0x100)
1697 /* This was a float compare. If the parity flag is set
1698 the result was unordered. For anything except != this
1699 means false and we don't jump (anding both conditions).
1700 For != this means true (oring both).
1701 Take care about inverting the test. We need to jump
1702 to our target if the result was unordered and test wasn't NE,
1703 otherwise if unordered we don't want to jump. */
1704 vtop
->c
.i
&= ~0x100;
1705 if (inv
== (vtop
->c
.i
== TOK_NE
))
1706 o(0x067a); /* jp +6 */
1710 t
= gjmp2(0x8a, t
); /* jp t */
1714 t
= gjmp2((vtop
->c
.i
- 16) ^ inv
, t
);
1715 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1716 /* && or || optimization */
1717 if ((v
& 1) == inv
) {
1718 /* insert vtop->c jump list in t */
1719 uint32_t n1
, n
= vtop
->c
.i
;
1721 while ((n1
= read32le(cur_text_section
->data
+ n
)))
1723 write32le(cur_text_section
->data
+ n
, t
);
1735 /* generate an integer binary operation */
1736 void gen_opi(int op
)
1741 ll
= is64_type(vtop
[-1].type
.t
);
1742 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1743 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1747 case TOK_ADDC1
: /* add with carry generation */
1750 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1757 /* XXX: generate inc and dec for smaller code ? */
1758 orex(ll
, r
, 0, 0x83);
1759 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1762 orex(ll
, r
, 0, 0x81);
1763 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1766 gv2(RC_INT
, RC_INT
);
1769 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1770 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1773 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1779 case TOK_SUBC1
: /* sub with carry generation */
1782 case TOK_ADDC2
: /* add with carry use */
1785 case TOK_SUBC2
: /* sub with carry use */
1798 gv2(RC_INT
, RC_INT
);
1801 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1802 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1814 opc
= 0xc0 | (opc
<< 3);
1820 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1821 o(opc
| REG_VALUE(r
));
1822 g(vtop
->c
.i
& (ll
? 63 : 31));
1824 /* we generate the shift in ecx */
1825 gv2(RC_INT
, RC_RCX
);
1827 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1828 o(opc
| REG_VALUE(r
));
1841 /* first operand must be in eax */
1842 /* XXX: need better constraint for second operand */
1843 gv2(RC_RAX
, RC_RCX
);
1848 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1849 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1850 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1851 if (op
== '%' || op
== TOK_UMOD
)
1863 void gen_opl(int op
)
1868 /* generate a floating point operation 'v = t1 op t2' instruction. The
1869 two operands are guaranteed to have the same floating point type */
1870 /* XXX: need to use ST1 too */
1871 void gen_opf(int op
)
1873 int a
, ft
, fc
, swapped
, r
;
1875 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1877 /* convert constants to memory references */
1878 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1883 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1886 /* must put at least one value in the floating point register */
1887 if ((vtop
[-1].r
& VT_LVAL
) &&
1888 (vtop
[0].r
& VT_LVAL
)) {
1894 /* swap the stack if needed so that t1 is the register and t2 is
1895 the memory reference */
1896 if (vtop
[-1].r
& VT_LVAL
) {
1900 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1901 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1902 /* load on stack second operand */
1903 load(TREG_ST0
, vtop
);
1904 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1905 if (op
== TOK_GE
|| op
== TOK_GT
)
1907 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1910 o(0xc9d9); /* fxch %st(1) */
1911 if (op
== TOK_EQ
|| op
== TOK_NE
)
1912 o(0xe9da); /* fucompp */
1914 o(0xd9de); /* fcompp */
1915 o(0xe0df); /* fnstsw %ax */
1917 o(0x45e480); /* and $0x45, %ah */
1918 o(0x40fC80); /* cmp $0x40, %ah */
1919 } else if (op
== TOK_NE
) {
1920 o(0x45e480); /* and $0x45, %ah */
1921 o(0x40f480); /* xor $0x40, %ah */
1923 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1924 o(0x05c4f6); /* test $0x05, %ah */
1927 o(0x45c4f6); /* test $0x45, %ah */
1934 /* no memory reference possible for long double operations */
1935 load(TREG_ST0
, vtop
);
1959 o(0xde); /* fxxxp %st, %st(1) */
1964 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1965 /* if saved lvalue, then we must reload it */
1968 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1970 r
= get_reg(RC_INT
);
1972 v1
.r
= VT_LOCAL
| VT_LVAL
;
1978 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1981 if (op
== TOK_LE
|| op
== TOK_LT
)
1983 if (op
== TOK_LE
|| op
== TOK_GE
) {
1984 op
= 0x93; /* setae */
1986 op
= 0x97; /* seta */
1994 assert(!(vtop
[-1].r
& VT_LVAL
));
1996 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1998 if (op
== TOK_EQ
|| op
== TOK_NE
)
1999 o(0x2e0f); /* ucomisd */
2001 o(0x2f0f); /* comisd */
2003 if (vtop
->r
& VT_LVAL
) {
2004 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2006 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2011 vtop
->c
.i
= op
| 0x100;
2013 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
2031 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
2034 /* if saved lvalue, then we must reload it */
2035 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
2037 r
= get_reg(RC_INT
);
2039 v1
.r
= VT_LOCAL
| VT_LVAL
;
2045 assert(!(vtop
[-1].r
& VT_LVAL
));
2047 assert(vtop
->r
& VT_LVAL
);
2052 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2060 if (vtop
->r
& VT_LVAL
) {
2061 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2063 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2071 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2072 and 'long long' cases. */
2073 void gen_cvt_itof(int t
)
2075 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2078 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2079 /* signed long long to float/double/long double (unsigned case
2080 is handled generically) */
2081 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2082 o(0x242cdf); /* fildll (%rsp) */
2083 o(0x08c48348); /* add $8, %rsp */
2084 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2085 (VT_INT
| VT_UNSIGNED
)) {
2086 /* unsigned int to float/double/long double */
2087 o(0x6a); /* push $0 */
2089 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2090 o(0x242cdf); /* fildll (%rsp) */
2091 o(0x10c48348); /* add $16, %rsp */
2093 /* int to float/double/long double */
2094 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2095 o(0x2404db); /* fildl (%rsp) */
2096 o(0x08c48348); /* add $8, %rsp */
2100 int r
= get_reg(RC_FLOAT
);
2102 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2103 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2104 (VT_INT
| VT_UNSIGNED
) ||
2105 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2109 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2114 /* convert from one floating point type to another */
2115 void gen_cvt_ftof(int t
)
2123 if (bt
== VT_FLOAT
) {
2125 if (tbt
== VT_DOUBLE
) {
2126 o(0x140f); /* unpcklps */
2127 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2128 o(0x5a0f); /* cvtps2pd */
2129 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2130 } else if (tbt
== VT_LDOUBLE
) {
2132 /* movss %xmm0,-0x10(%rsp) */
2134 o(0x44 + REG_VALUE(vtop
->r
)*8);
2136 o(0xf02444d9); /* flds -0x10(%rsp) */
2139 } else if (bt
== VT_DOUBLE
) {
2141 if (tbt
== VT_FLOAT
) {
2142 o(0x140f66); /* unpcklpd */
2143 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2144 o(0x5a0f66); /* cvtpd2ps */
2145 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2146 } else if (tbt
== VT_LDOUBLE
) {
2148 /* movsd %xmm0,-0x10(%rsp) */
2150 o(0x44 + REG_VALUE(vtop
->r
)*8);
2152 o(0xf02444dd); /* fldl -0x10(%rsp) */
2158 r
= get_reg(RC_FLOAT
);
2159 if (tbt
== VT_DOUBLE
) {
2160 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2161 /* movsd -0x10(%rsp),%xmm0 */
2163 o(0x44 + REG_VALUE(r
)*8);
2166 } else if (tbt
== VT_FLOAT
) {
2167 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2168 /* movss -0x10(%rsp),%xmm0 */
2170 o(0x44 + REG_VALUE(r
)*8);
2177 /* convert fp to int 't' type */
2178 void gen_cvt_ftoi(int t
)
2180 int ft
, bt
, size
, r
;
2183 if (bt
== VT_LDOUBLE
) {
2184 gen_cvt_ftof(VT_DOUBLE
);
2194 r
= get_reg(RC_INT
);
2195 if (bt
== VT_FLOAT
) {
2197 } else if (bt
== VT_DOUBLE
) {
2202 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2203 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2207 /* computed goto support */
2214 /* Save the stack pointer onto the stack and return the location of its address */
2215 ST_FUNC
void gen_vla_sp_save(int addr
) {
2216 /* mov %rsp,addr(%rbp)*/
2217 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2220 /* Restore the SP from a location on the stack */
2221 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2222 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2225 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2226 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2227 #ifdef TCC_TARGET_PE
2228 /* alloca does more than just adjust %rsp on Windows */
2229 vpush_global_sym(&func_old_type
, TOK_alloca
);
2230 vswap(); /* Move alloca ref past allocation size */
2234 r
= gv(RC_INT
); /* allocation size */
2237 o(0xe0 | REG_VALUE(r
));
2238 /* We align to 16 bytes rather than align */
2246 /* end of x86-64 code generator */
2247 /*************************************************************/
2248 #endif /* ! TARGET_DEFS_ONLY */
2249 /******************************************************/