2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
37 #define RC_ST0 0x0080 /* only for long double */
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
104 /******************************************************/
107 #define EM_TCC_TARGET EM_X86_64
109 /* relocation type for 32 bit data relocation */
110 #define R_DATA_32 R_X86_64_32
111 #define R_DATA_PTR R_X86_64_64
112 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
113 #define R_GLOB_DAT R_X86_64_GLOB_DAT
114 #define R_COPY R_X86_64_COPY
116 #define ELF_START_ADDR 0x400000
117 #define ELF_PAGE_SIZE 0x200000
119 /******************************************************/
120 #else /* ! TARGET_DEFS_ONLY */
121 /******************************************************/
125 ST_DATA
const int reg_classes
[NB_REGS
] = {
126 /* eax */ RC_INT
| RC_RAX
,
127 /* ecx */ RC_INT
| RC_RCX
,
128 /* edx */ RC_INT
| RC_RDX
,
142 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
143 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
144 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
145 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
146 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
147 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
148 /* xmm6 an xmm7 are included so gv() can be used on them,
149 but they are not tagged with RC_FLOAT because they are
150 callee saved on Windows */
156 static unsigned long func_sub_sp_offset
;
157 static int func_ret_sub
;
159 /* XXX: make it faster ? */
160 ST_FUNC
void g(int c
)
164 if (ind1
> cur_text_section
->data_allocated
)
165 section_realloc(cur_text_section
, ind1
);
166 cur_text_section
->data
[ind
] = c
;
170 ST_FUNC
void o(unsigned int c
)
178 ST_FUNC
void gen_le16(int v
)
184 ST_FUNC
void gen_le32(int c
)
192 ST_FUNC
void gen_le64(int64_t c
)
204 static void orex(int ll
, int r
, int r2
, int b
)
206 if ((r
& VT_VALMASK
) >= VT_CONST
)
208 if ((r2
& VT_VALMASK
) >= VT_CONST
)
210 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
211 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
215 /* output a symbol and patch all calls to it */
216 ST_FUNC
void gsym_addr(int t
, int a
)
219 unsigned char *ptr
= cur_text_section
->data
+ t
;
220 uint32_t n
= read32le(ptr
); /* next value */
221 write32le(ptr
, a
- t
- 4);
231 /* psym is used to put an instruction with a data field which is a
232 reference to a symbol. It is in fact the same as oad ! */
235 static int is64_type(int t
)
237 return ((t
& VT_BTYPE
) == VT_PTR
||
238 (t
& VT_BTYPE
) == VT_FUNC
||
239 (t
& VT_BTYPE
) == VT_LLONG
);
242 /* instruction + 4 bytes data. Return the address of the data */
243 ST_FUNC
int oad(int c
, int s
)
249 if (ind1
> cur_text_section
->data_allocated
)
250 section_realloc(cur_text_section
, ind1
);
251 write32le(cur_text_section
->data
+ ind
, s
);
257 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
260 greloca(cur_text_section
, sym
, ind
, R_X86_64_32
, c
), c
=0;
264 /* output constant with relocation if 'r & VT_SYM' is true */
265 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
268 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
272 /* output constant with relocation if 'r & VT_SYM' is true */
273 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
276 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
280 /* output got address with relocation */
281 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
283 #ifndef TCC_TARGET_PE
284 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
286 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
287 get_tok_str(sym
->v
, NULL
), c
, r
,
288 cur_text_section
->data
[ind
-3],
289 cur_text_section
->data
[ind
-2],
290 cur_text_section
->data
[ind
-1]
292 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
296 /* we use add c, %xxx for displacement */
298 o(0xc0 + REG_VALUE(r
));
303 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
305 op_reg
= REG_VALUE(op_reg
) << 3;
306 if ((r
& VT_VALMASK
) == VT_CONST
) {
307 /* constant memory reference */
310 gen_gotpcrel(r
, sym
, c
);
312 gen_addrpc32(r
, sym
, c
);
314 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
315 /* currently, we use only ebp as base */
317 /* short reference */
321 oad(0x85 | op_reg
, c
);
323 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
325 g(0x80 | op_reg
| REG_VALUE(r
));
328 g(0x00 | op_reg
| REG_VALUE(r
));
331 g(0x00 | op_reg
| REG_VALUE(r
));
335 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
337 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
339 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
342 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
344 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
347 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
348 orex(1, r
, op_reg
, opcode
);
349 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
353 /* load 'r' from value 'sv' */
354 void load(int r
, SValue
*sv
)
356 int v
, t
, ft
, fc
, fr
;
361 sv
= pe_getimport(sv
, &v2
);
365 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
368 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
370 #ifndef TCC_TARGET_PE
371 /* we use indirect access via got */
372 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
373 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
374 /* use the result register as a temporal register */
375 int tr
= r
| TREG_MEM
;
377 /* we cannot use float registers as a temporal register */
378 tr
= get_reg(RC_INT
) | TREG_MEM
;
380 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
382 /* load from the temporal register */
390 if (v
== VT_LLOCAL
) {
392 v1
.r
= VT_LOCAL
| VT_LVAL
;
395 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
396 fr
= get_reg(RC_INT
);
400 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
402 r
= REG_VALUE(r
); /* movd */
403 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
404 b
= 0x7e0ff3; /* movq */
406 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
407 b
= 0xdb, r
= 5; /* fldt */
408 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
409 b
= 0xbe0f; /* movsbl */
410 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
411 b
= 0xb60f; /* movzbl */
412 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
413 b
= 0xbf0f; /* movswl */
414 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
415 b
= 0xb70f; /* movzwl */
417 assert(((ft
& VT_BTYPE
) == VT_INT
) || ((ft
& VT_BTYPE
) == VT_LLONG
)
418 || ((ft
& VT_BTYPE
) == VT_PTR
) || ((ft
& VT_BTYPE
) == VT_ENUM
)
419 || ((ft
& VT_BTYPE
) == VT_FUNC
));
424 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
427 gen_modrm(r
, fr
, sv
->sym
, fc
);
434 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
435 gen_addrpc32(fr
, sv
->sym
, fc
);
437 if (sv
->sym
->type
.t
& VT_STATIC
) {
439 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
440 gen_addrpc32(fr
, sv
->sym
, fc
);
443 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
444 gen_gotpcrel(r
, sv
->sym
, fc
);
447 } else if (is64_type(ft
)) {
448 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
451 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
454 } else if (v
== VT_LOCAL
) {
455 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
456 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
457 } else if (v
== VT_CMP
) {
459 if ((fc
& ~0x100) != TOK_NE
)
460 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
462 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
465 /* This was a float compare. If the parity bit is
466 set the result was unordered, meaning false for everything
467 except TOK_NE, and true for TOK_NE. */
469 o(0x037a + (REX_BASE(r
) << 8));
471 orex(0,r
,0, 0x0f); /* setxx %br */
473 o(0xc0 + REG_VALUE(r
));
474 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
477 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
478 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
481 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
483 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
485 /* gen_cvt_ftof(VT_DOUBLE); */
486 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
487 /* movsd -0x10(%rsp),%xmmN */
489 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
492 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
493 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
496 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
499 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
501 } else if (r
== TREG_ST0
) {
502 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
503 /* gen_cvt_ftof(VT_LDOUBLE); */
504 /* movsd %xmmN,-0x10(%rsp) */
506 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
508 o(0xf02444dd); /* fldl -0x10(%rsp) */
511 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
517 /* store register 'r' in lvalue 'v' */
518 void store(int r
, SValue
*v
)
522 /* store the REX prefix in this variable when PIC is enabled */
527 v
= pe_getimport(v
, &v2
);
532 fr
= v
->r
& VT_VALMASK
;
533 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
536 #ifndef TCC_TARGET_PE
537 /* we need to access the variable via got */
538 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
539 /* mov xx(%rip), %r11 */
541 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
542 pic
= is64_type(bt
) ? 0x49 : 0x41;
546 /* XXX: incorrect if float reg to reg */
547 if (bt
== VT_FLOAT
) {
550 o(0x7e0f); /* movd */
552 } else if (bt
== VT_DOUBLE
) {
555 o(0xd60f); /* movq */
557 } else if (bt
== VT_LDOUBLE
) {
558 o(0xc0d9); /* fld %st(0) */
566 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
568 else if (is64_type(bt
))
574 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
579 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
580 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
581 } else if (fr
!= r
) {
582 /* XXX: don't we really come here? */
584 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
587 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
588 gen_modrm(r
, v
->r
, v
->sym
, fc
);
589 } else if (fr
!= r
) {
590 /* XXX: don't we really come here? */
592 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
597 /* 'is_jmp' is '1' if it is a jump */
598 static void gcall_or_jmp(int is_jmp
)
601 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
602 ((vtop
->r
& VT_SYM
) || (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
604 if (vtop
->r
& VT_SYM
) {
605 /* relocation case */
607 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
609 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
612 /* put an empty PC32 relocation */
613 put_elf_reloca(symtab_section
, cur_text_section
,
614 ind
+ 1, R_X86_64_PC32
, 0, (int)(vtop
->c
.i
-4));
616 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
618 /* otherwise, indirect call */
622 o(0xff); /* call/jmp *r */
623 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
627 #if defined(CONFIG_TCC_BCHECK)
628 #ifndef TCC_TARGET_PE
629 static addr_t func_bound_offset
;
630 static unsigned long func_bound_ind
;
633 static void gen_static_call(int v
)
635 Sym
*sym
= external_global_sym(v
, &func_old_type
, 0);
637 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
640 /* generate a bounded pointer addition */
641 ST_FUNC
void gen_bounded_ptr_add(void)
643 /* save all temporary registers */
646 /* prepare fast x86_64 function call */
648 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
652 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
655 /* do a fast function call */
656 gen_static_call(TOK___bound_ptr_add
);
658 /* returned pointer is in rax */
660 vtop
->r
= TREG_RAX
| VT_BOUNDED
;
663 /* relocation offset of the bounding function call point */
664 vtop
->c
.i
= (cur_text_section
->reloc
->data_offset
- sizeof(ElfW(Rela
)));
667 /* patch pointer addition in vtop so that pointer dereferencing is
669 ST_FUNC
void gen_bounded_ptr_deref(void)
677 /* XXX: put that code in generic part of tcc */
678 if (!is_float(vtop
->type
.t
)) {
679 if (vtop
->r
& VT_LVAL_BYTE
)
681 else if (vtop
->r
& VT_LVAL_SHORT
)
685 size
= type_size(&vtop
->type
, &align
);
687 case 1: func
= TOK___bound_ptr_indir1
; break;
688 case 2: func
= TOK___bound_ptr_indir2
; break;
689 case 4: func
= TOK___bound_ptr_indir4
; break;
690 case 8: func
= TOK___bound_ptr_indir8
; break;
691 case 12: func
= TOK___bound_ptr_indir12
; break;
692 case 16: func
= TOK___bound_ptr_indir16
; break;
694 tcc_error("unhandled size when dereferencing bounded pointer");
699 sym
= external_global_sym(func
, &func_old_type
, 0);
701 put_extern_sym(sym
, NULL
, 0, 0);
703 /* patch relocation */
704 /* XXX: find a better solution ? */
706 rel
= (ElfW(Rela
) *)(cur_text_section
->reloc
->data
+ vtop
->c
.i
);
707 rel
->r_info
= ELF64_R_INFO(sym
->c
, ELF64_R_TYPE(rel
->r_info
));
714 static const uint8_t arg_regs
[REGN
] = {
715 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
718 /* Prepare arguments in R10 and R11 rather than RCX and RDX
719 because gv() will not ever use these */
720 static int arg_prepare_reg(int idx
) {
721 if (idx
== 0 || idx
== 1)
722 /* idx=0: r10, idx=1: r11 */
725 return arg_regs
[idx
];
728 static int func_scratch
;
730 /* Generate function call. The function address is pushed first, then
731 all the parameters in call order. This functions pops all the
732 parameters and the function address. */
734 void gen_offs_sp(int b
, int r
, int d
)
736 orex(1,0,r
& 0x100 ? 0 : r
, b
);
738 o(0x2444 | (REG_VALUE(r
) << 3));
741 o(0x2484 | (REG_VALUE(r
) << 3));
746 /* Return the number of registers needed to return the struct, or 0 if
747 returning via struct pointer. */
748 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
752 *ret_align
= 1; // Never have to re-align return values for x86-64
753 size
= type_size(vt
, &align
);
757 } else if (size
> 4) {
760 } else if (size
> 2) {
763 } else if (size
> 1) {
772 static int is_sse_float(int t
) {
775 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
778 int gfunc_arg_size(CType
*type
) {
780 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
782 return type_size(type
, &align
);
785 void gfunc_call(int nb_args
)
787 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
790 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
793 /* for struct arguments, we need to call memcpy and the function
794 call breaks register passing arguments we are preparing.
795 So, we process arguments which will be passed by stack first. */
796 struct_size
= args_size
;
797 for(i
= 0; i
< nb_args
; i
++) {
802 bt
= (sv
->type
.t
& VT_BTYPE
);
803 size
= gfunc_arg_size(&sv
->type
);
806 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
808 if (bt
== VT_STRUCT
) {
809 /* align to stack align size */
810 size
= (size
+ 15) & ~15;
811 /* generate structure store */
813 gen_offs_sp(0x8d, r
, struct_size
);
816 /* generate memcpy call */
817 vset(&sv
->type
, r
| VT_LVAL
, 0);
821 } else if (bt
== VT_LDOUBLE
) {
823 gen_offs_sp(0xdb, 0x107, struct_size
);
828 if (func_scratch
< struct_size
)
829 func_scratch
= struct_size
;
832 struct_size
= args_size
;
834 for(i
= 0; i
< nb_args
; i
++) {
836 bt
= (vtop
->type
.t
& VT_BTYPE
);
838 size
= gfunc_arg_size(&vtop
->type
);
840 /* align to stack align size */
841 size
= (size
+ 15) & ~15;
844 gen_offs_sp(0x8d, d
, struct_size
);
845 gen_offs_sp(0x89, d
, arg
*8);
847 d
= arg_prepare_reg(arg
);
848 gen_offs_sp(0x8d, d
, struct_size
);
852 if (is_sse_float(vtop
->type
.t
)) {
853 gv(RC_XMM0
); /* only use one float register */
855 /* movq %xmm0, j*8(%rsp) */
856 gen_offs_sp(0xd60f66, 0x100, arg
*8);
858 /* movaps %xmm0, %xmmN */
860 o(0xc0 + (arg
<< 3));
861 d
= arg_prepare_reg(arg
);
862 /* mov %xmm0, %rxx */
865 o(0xc0 + REG_VALUE(d
));
868 if (bt
== VT_STRUCT
) {
869 vtop
->type
.ref
= NULL
;
870 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
871 : size
> 1 ? VT_SHORT
: VT_BYTE
;
876 gen_offs_sp(0x89, r
, arg
*8);
878 d
= arg_prepare_reg(arg
);
879 orex(1,d
,r
,0x89); /* mov */
880 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
888 /* Copy R10 and R11 into RCX and RDX, respectively */
890 o(0xd1894c); /* mov %r10, %rcx */
892 o(0xda894c); /* mov %r11, %rdx */
897 /* other compilers don't clear the upper bits when returning char/short */
898 bt
= vtop
->type
.ref
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
);
899 if (bt
== (VT_BYTE
| VT_UNSIGNED
))
900 o(0xc0b60f); /* movzbl %al, %eax */
901 else if (bt
== VT_BYTE
)
902 o(0xc0be0f); /* movsbl %al, %eax */
903 else if (bt
== VT_SHORT
)
905 else if (bt
== (VT_SHORT
| VT_UNSIGNED
))
906 o(0xc0b70f); /* movzbl %al, %eax */
907 #if 0 /* handled in gen_cast() */
908 else if (bt
== VT_INT
)
909 o(0x9848); /* cltq */
910 else if (bt
== (VT_INT
| VT_UNSIGNED
))
911 o(0xc089); /* mov %eax,%eax */
917 #define FUNC_PROLOG_SIZE 11
919 /* generate function prolog of type 't' */
920 void gfunc_prolog(CType
*func_type
)
922 int addr
, reg_param_index
, bt
, size
;
931 ind
+= FUNC_PROLOG_SIZE
;
932 func_sub_sp_offset
= ind
;
935 sym
= func_type
->ref
;
937 /* if the function returns a structure, then add an
938 implicit pointer parameter */
940 func_var
= (sym
->c
== FUNC_ELLIPSIS
);
941 size
= gfunc_arg_size(&func_vt
);
943 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
949 /* define parameters */
950 while ((sym
= sym
->next
) != NULL
) {
952 bt
= type
->t
& VT_BTYPE
;
953 size
= gfunc_arg_size(type
);
955 if (reg_param_index
< REGN
) {
956 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
958 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
| VT_REF
, addr
);
960 if (reg_param_index
< REGN
) {
961 /* save arguments passed by register */
962 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
963 o(0xd60f66); /* movq */
964 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
966 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
969 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
975 while (reg_param_index
< REGN
) {
976 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
977 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
984 /* generate function epilog */
985 void gfunc_epilog(void)
990 if (func_ret_sub
== 0) {
995 g(func_ret_sub
>> 8);
999 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1000 /* align local size to word & save local variables */
1001 v
= (func_scratch
+ -loc
+ 15) & -16;
1004 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
1005 oad(0xb8, v
); /* mov stacksize, %eax */
1006 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1007 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
1008 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1010 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1011 o(0xec8148); /* sub rsp, stacksize */
1015 cur_text_section
->data_offset
= saved_ind
;
1016 pe_add_unwind_data(ind
, saved_ind
, v
);
1017 ind
= cur_text_section
->data_offset
;
1022 static void gadd_sp(int val
)
1024 if (val
== (char)val
) {
1028 oad(0xc48148, val
); /* add $xxx, %rsp */
1032 typedef enum X86_64_Mode
{
1035 x86_64_mode_integer
,
1040 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1044 else if (a
== x86_64_mode_none
)
1046 else if (b
== x86_64_mode_none
)
1048 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1049 return x86_64_mode_memory
;
1050 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1051 return x86_64_mode_integer
;
1052 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1053 return x86_64_mode_memory
;
1055 return x86_64_mode_sse
;
1058 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1063 switch (ty
->t
& VT_BTYPE
) {
1064 case VT_VOID
: return x86_64_mode_none
;
1073 case VT_ENUM
: return x86_64_mode_integer
;
1076 case VT_DOUBLE
: return x86_64_mode_sse
;
1078 case VT_LDOUBLE
: return x86_64_mode_x87
;
1083 mode
= x86_64_mode_none
;
1084 for (f
= f
->next
; f
; f
= f
->next
)
1085 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1093 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1096 int size
, align
, ret_t
= 0;
1098 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1103 mode
= x86_64_mode_integer
;
1105 size
= type_size(ty
, &align
);
1106 *psize
= (size
+ 7) & ~7;
1107 *palign
= (align
+ 7) & ~7;
1110 mode
= x86_64_mode_memory
;
1112 mode
= classify_x86_64_inner(ty
);
1114 case x86_64_mode_integer
:
1120 ret_t
= (size
> 4) ? VT_LLONG
: VT_INT
;
1124 case x86_64_mode_x87
:
1129 case x86_64_mode_sse
:
1135 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1138 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1151 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1153 /* This definition must be synced with stdarg.h */
1154 enum __va_arg_type
{
1155 __va_gen_reg
, __va_float_reg
, __va_stack
1157 int size
, align
, reg_count
;
1158 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1160 default: return __va_stack
;
1161 case x86_64_mode_integer
: return __va_gen_reg
;
1162 case x86_64_mode_sse
: return __va_float_reg
;
1166 /* Return the number of registers needed to return the struct, or 0 if
1167 returning via struct pointer. */
1168 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1170 int size
, align
, reg_count
;
1171 *ret_align
= 1; // Never have to re-align return values for x86-64
1173 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1177 static const uint8_t arg_regs
[REGN
] = {
1178 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1181 static int arg_prepare_reg(int idx
) {
1182 if (idx
== 2 || idx
== 3)
1183 /* idx=2: r10, idx=3: r11 */
1186 return arg_regs
[idx
];
1189 /* Generate function call. The function address is pushed first, then
1190 all the parameters in call order. This functions pops all the
1191 parameters and the function address. */
1192 void gfunc_call(int nb_args
)
1196 int size
, align
, r
, args_size
, stack_adjust
, run_start
, run_end
, i
, reg_count
;
1197 int nb_reg_args
= 0;
1198 int nb_sse_args
= 0;
1199 int sse_reg
, gen_reg
;
1201 /* calculate the number of integer/float register arguments */
1202 for(i
= 0; i
< nb_args
; i
++) {
1203 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1204 if (mode
== x86_64_mode_sse
)
1205 nb_sse_args
+= reg_count
;
1206 else if (mode
== x86_64_mode_integer
)
1207 nb_reg_args
+= reg_count
;
1210 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1211 and ended by a 16-byte aligned argument. This is because, from the point of view of
1212 the callee, argument alignment is computed from the bottom up. */
1213 /* for struct arguments, we need to call memcpy and the function
1214 call breaks register passing arguments we are preparing.
1215 So, we process arguments which will be passed by stack first. */
1216 gen_reg
= nb_reg_args
;
1217 sse_reg
= nb_sse_args
;
1220 while (run_start
!= nb_args
) {
1221 int run_gen_reg
= gen_reg
, run_sse_reg
= sse_reg
;
1225 for(i
= run_start
; (i
< nb_args
) && (run_end
== nb_args
); i
++) {
1226 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1228 case x86_64_mode_memory
:
1229 case x86_64_mode_x87
:
1234 stack_adjust
+= size
;
1237 case x86_64_mode_sse
:
1238 sse_reg
-= reg_count
;
1239 if (sse_reg
+ reg_count
> 8) goto stack_arg
;
1242 case x86_64_mode_integer
:
1243 gen_reg
-= reg_count
;
1244 if (gen_reg
+ reg_count
> REGN
) goto stack_arg
;
1246 default: break; /* nothing to be done for x86_64_mode_none */
1250 gen_reg
= run_gen_reg
;
1251 sse_reg
= run_sse_reg
;
1253 /* adjust stack to align SSE boundary */
1254 if (stack_adjust
&= 15) {
1255 /* fetch cpu flag before the following sub will change the value */
1256 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
1259 stack_adjust
= 16 - stack_adjust
;
1261 oad(0xec81, stack_adjust
); /* sub $xxx, %rsp */
1262 args_size
+= stack_adjust
;
1265 for(i
= run_start
; i
< run_end
;) {
1266 /* Swap argument to top, it will possibly be changed here,
1267 and might use more temps. At the end of the loop we keep
1268 in on the stack and swap it back to its original position
1269 if it is a register. */
1270 SValue tmp
= vtop
[0];
1275 mode
= classify_x86_64_arg(&vtop
->type
, NULL
, &size
, &align
, ®_count
);
1277 switch (vtop
->type
.t
& VT_BTYPE
) {
1279 if (mode
== x86_64_mode_sse
) {
1281 sse_reg
-= reg_count
;
1284 } else if (mode
== x86_64_mode_integer
) {
1286 gen_reg
-= reg_count
;
1292 /* allocate the necessary size on stack */
1294 oad(0xec81, size
); /* sub $xxx, %rsp */
1295 /* generate structure store */
1296 r
= get_reg(RC_INT
);
1297 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1298 o(0xe0 + REG_VALUE(r
));
1299 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1312 assert(mode
== x86_64_mode_sse
);
1316 o(0x50); /* push $rax */
1317 /* movq %xmmN, (%rsp) */
1319 o(0x04 + REG_VALUE(r
)*8);
1328 assert(mode
== x86_64_mode_integer
);
1330 /* XXX: implicit cast ? */
1331 if (gen_reg
> REGN
) {
1334 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1342 /* And swap the argument back to it's original position. */
1349 assert((vtop
->type
.t
== tmp
.type
.t
) && (vtop
->r
== tmp
.r
));
1358 /* handle 16 byte aligned arguments at end of run */
1359 run_start
= i
= run_end
;
1360 while (i
< nb_args
) {
1361 /* Rotate argument to top since it will always be popped */
1362 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1368 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1370 oad(0xec8148, size
); /* sub $xxx, %rsp */
1371 o(0x7cdb); /* fstpt 0(%rsp) */
1376 assert(mode
== x86_64_mode_memory
);
1378 /* allocate the necessary size on stack */
1380 oad(0xec81, size
); /* sub $xxx, %rsp */
1381 /* generate structure store */
1382 r
= get_reg(RC_INT
);
1383 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1384 o(0xe0 + REG_VALUE(r
));
1385 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1396 /* XXX This should be superfluous. */
1397 save_regs(0); /* save used temporary registers */
1399 /* then, we prepare register passing arguments.
1400 Note that we cannot set RDX and RCX in this loop because gv()
1401 may break these temporary registers. Let's use R10 and R11
1403 assert(gen_reg
<= REGN
);
1404 assert(sse_reg
<= 8);
1405 for(i
= 0; i
< nb_args
; i
++) {
1406 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1407 /* Alter stack entry type so that gv() knows how to treat it */
1409 if (mode
== x86_64_mode_sse
) {
1410 if (reg_count
== 2) {
1412 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1413 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1414 /* movaps %xmm0, %xmmN */
1416 o(0xc0 + (sse_reg
<< 3));
1417 /* movaps %xmm1, %xmmN */
1419 o(0xc1 + ((sse_reg
+1) << 3));
1422 assert(reg_count
== 1);
1424 /* Load directly to register */
1425 gv(RC_XMM0
<< sse_reg
);
1427 } else if (mode
== x86_64_mode_integer
) {
1429 /* XXX: implicit cast ? */
1431 gen_reg
-= reg_count
;
1433 d
= arg_prepare_reg(gen_reg
);
1434 orex(1,d
,r
,0x89); /* mov */
1435 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1436 if (reg_count
== 2) {
1437 d
= arg_prepare_reg(gen_reg
+1);
1438 orex(1,d
,vtop
->r2
,0x89); /* mov */
1439 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1444 assert(gen_reg
== 0);
1445 assert(sse_reg
== 0);
1447 /* We shouldn't have many operands on the stack anymore, but the
1448 call address itself is still there, and it might be in %eax
1449 (or edx/ecx) currently, which the below writes would clobber.
1450 So evict all remaining operands here. */
1453 /* Copy R10 and R11 into RDX and RCX, respectively */
1454 if (nb_reg_args
> 2) {
1455 o(0xd2894c); /* mov %r10, %rdx */
1456 if (nb_reg_args
> 3) {
1457 o(0xd9894c); /* mov %r11, %rcx */
1461 if (vtop
->type
.ref
->c
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1462 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1470 #define FUNC_PROLOG_SIZE 11
1472 static void push_arg_reg(int i
) {
1474 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1477 /* generate function prolog of type 't' */
1478 void gfunc_prolog(CType
*func_type
)
1481 int i
, addr
, align
, size
, reg_count
;
1482 int param_addr
= 0, reg_param_index
, sse_param_index
;
1486 sym
= func_type
->ref
;
1487 addr
= PTR_SIZE
* 2;
1489 ind
+= FUNC_PROLOG_SIZE
;
1490 func_sub_sp_offset
= ind
;
1493 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
1494 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1495 seen_reg_num
= seen_sse_num
= 0;
1496 /* frame pointer and return address */
1497 seen_stack_size
= PTR_SIZE
* 2;
1498 /* count the number of seen parameters */
1499 sym
= func_type
->ref
;
1500 while ((sym
= sym
->next
) != NULL
) {
1502 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1506 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1509 case x86_64_mode_integer
:
1510 if (seen_reg_num
+ reg_count
<= 8) {
1511 seen_reg_num
+= reg_count
;
1518 case x86_64_mode_sse
:
1519 if (seen_sse_num
+ reg_count
<= 8) {
1520 seen_sse_num
+= reg_count
;
1530 /* movl $0x????????, -0x10(%rbp) */
1532 gen_le32(seen_reg_num
* 8);
1533 /* movl $0x????????, -0xc(%rbp) */
1535 gen_le32(seen_sse_num
* 16 + 48);
1536 /* movl $0x????????, -0x8(%rbp) */
1538 gen_le32(seen_stack_size
);
1540 /* save all register passing arguments */
1541 for (i
= 0; i
< 8; i
++) {
1543 o(0xd60f66); /* movq */
1544 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1545 /* movq $0, loc+8(%rbp) */
1550 for (i
= 0; i
< REGN
; i
++) {
1551 push_arg_reg(REGN
-1-i
);
1555 sym
= func_type
->ref
;
1556 reg_param_index
= 0;
1557 sse_param_index
= 0;
1559 /* if the function returns a structure, then add an
1560 implicit pointer parameter */
1561 func_vt
= sym
->type
;
1562 mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1563 if (mode
== x86_64_mode_memory
) {
1564 push_arg_reg(reg_param_index
);
1568 /* define parameters */
1569 while ((sym
= sym
->next
) != NULL
) {
1571 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1573 case x86_64_mode_sse
:
1574 if (sse_param_index
+ reg_count
<= 8) {
1575 /* save arguments passed by register */
1576 loc
-= reg_count
* 8;
1578 for (i
= 0; i
< reg_count
; ++i
) {
1579 o(0xd60f66); /* movq */
1580 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1584 addr
= (addr
+ align
- 1) & -align
;
1590 case x86_64_mode_memory
:
1591 case x86_64_mode_x87
:
1592 addr
= (addr
+ align
- 1) & -align
;
1597 case x86_64_mode_integer
: {
1598 if (reg_param_index
+ reg_count
<= REGN
) {
1599 /* save arguments passed by register */
1600 loc
-= reg_count
* 8;
1602 for (i
= 0; i
< reg_count
; ++i
) {
1603 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1607 addr
= (addr
+ align
- 1) & -align
;
1613 default: break; /* nothing to be done for x86_64_mode_none */
1615 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1616 VT_LOCAL
| VT_LVAL
, param_addr
);
1619 #ifdef CONFIG_TCC_BCHECK
1620 /* leave some room for bound checking code */
1621 if (tcc_state
->do_bounds_check
) {
1622 func_bound_offset
= lbounds_section
->data_offset
;
1623 func_bound_ind
= ind
;
1624 oad(0xb8, 0); /* lbound section pointer */
1625 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1626 oad(0xb8, 0); /* call to function */
1631 /* generate function epilog */
1632 void gfunc_epilog(void)
1636 #ifdef CONFIG_TCC_BCHECK
1637 if (tcc_state
->do_bounds_check
1638 && func_bound_offset
!= lbounds_section
->data_offset
)
1644 /* add end of table info */
1645 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
1648 /* generate bound local allocation */
1649 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
1650 func_bound_offset
, lbounds_section
->data_offset
);
1652 ind
= func_bound_ind
;
1653 greloc(cur_text_section
, sym_data
, ind
+ 1, R_386_32
);
1655 gen_static_call(TOK___bound_local_new
);
1658 /* generate bound check local freeing */
1659 o(0x5250); /* save returned value, if any */
1660 greloc(cur_text_section
, sym_data
, ind
+ 1, R_386_32
);
1661 oad(0xb8, 0); /* mov xxx, %rax */
1662 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1663 gen_static_call(TOK___bound_local_delete
);
1664 o(0x585a); /* restore returned value, if any */
1667 o(0xc9); /* leave */
1668 if (func_ret_sub
== 0) {
1671 o(0xc2); /* ret n */
1673 g(func_ret_sub
>> 8);
1675 /* align local size to word & save local variables */
1676 v
= (-loc
+ 15) & -16;
1678 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1679 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1680 o(0xec8148); /* sub rsp, stacksize */
1687 /* generate a jump to a label */
1690 return psym(0xe9, t
);
1693 /* generate a jump to a fixed address */
1694 void gjmp_addr(int a
)
1702 oad(0xe9, a
- ind
- 5);
1706 ST_FUNC
void gtst_addr(int inv
, int a
)
1708 inv
^= (vtop
--)->c
.i
;
1715 oad(inv
- 16, a
- 4);
1719 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1720 ST_FUNC
int gtst(int inv
, int t
)
1722 int v
= vtop
->r
& VT_VALMASK
;
1724 /* fast case : can jump directly since flags are set */
1725 if (vtop
->c
.i
& 0x100)
1727 /* This was a float compare. If the parity flag is set
1728 the result was unordered. For anything except != this
1729 means false and we don't jump (anding both conditions).
1730 For != this means true (oring both).
1731 Take care about inverting the test. We need to jump
1732 to our target if the result was unordered and test wasn't NE,
1733 otherwise if unordered we don't want to jump. */
1734 vtop
->c
.i
&= ~0x100;
1735 if (inv
== (vtop
->c
.i
== TOK_NE
))
1736 o(0x067a); /* jp +6 */
1740 t
= psym(0x8a, t
); /* jp t */
1744 t
= psym((vtop
->c
.i
- 16) ^ inv
, t
);
1745 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1746 /* && or || optimization */
1747 if ((v
& 1) == inv
) {
1748 /* insert vtop->c jump list in t */
1749 uint32_t n1
, n
= vtop
->c
.i
;
1751 while ((n1
= read32le(cur_text_section
->data
+ n
)))
1753 write32le(cur_text_section
->data
+ n
, t
);
1765 /* generate an integer binary operation */
1766 void gen_opi(int op
)
1771 ll
= is64_type(vtop
[-1].type
.t
);
1772 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1773 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1777 case TOK_ADDC1
: /* add with carry generation */
1780 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1787 /* XXX: generate inc and dec for smaller code ? */
1788 orex(ll
, r
, 0, 0x83);
1789 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1792 orex(ll
, r
, 0, 0x81);
1793 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1796 gv2(RC_INT
, RC_INT
);
1799 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1800 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1803 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1809 case TOK_SUBC1
: /* sub with carry generation */
1812 case TOK_ADDC2
: /* add with carry use */
1815 case TOK_SUBC2
: /* sub with carry use */
1828 gv2(RC_INT
, RC_INT
);
1831 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1832 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1844 opc
= 0xc0 | (opc
<< 3);
1850 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1851 o(opc
| REG_VALUE(r
));
1852 g(vtop
->c
.i
& (ll
? 63 : 31));
1854 /* we generate the shift in ecx */
1855 gv2(RC_INT
, RC_RCX
);
1857 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1858 o(opc
| REG_VALUE(r
));
1871 /* first operand must be in eax */
1872 /* XXX: need better constraint for second operand */
1873 gv2(RC_RAX
, RC_RCX
);
1878 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1879 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1880 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1881 if (op
== '%' || op
== TOK_UMOD
)
1893 void gen_opl(int op
)
1898 /* generate a floating point operation 'v = t1 op t2' instruction. The
1899 two operands are guaranted to have the same floating point type */
1900 /* XXX: need to use ST1 too */
1901 void gen_opf(int op
)
1903 int a
, ft
, fc
, swapped
, r
;
1905 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1907 /* convert constants to memory references */
1908 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1913 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1916 /* must put at least one value in the floating point register */
1917 if ((vtop
[-1].r
& VT_LVAL
) &&
1918 (vtop
[0].r
& VT_LVAL
)) {
1924 /* swap the stack if needed so that t1 is the register and t2 is
1925 the memory reference */
1926 if (vtop
[-1].r
& VT_LVAL
) {
1930 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1931 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1932 /* load on stack second operand */
1933 load(TREG_ST0
, vtop
);
1934 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1935 if (op
== TOK_GE
|| op
== TOK_GT
)
1937 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1940 o(0xc9d9); /* fxch %st(1) */
1941 if (op
== TOK_EQ
|| op
== TOK_NE
)
1942 o(0xe9da); /* fucompp */
1944 o(0xd9de); /* fcompp */
1945 o(0xe0df); /* fnstsw %ax */
1947 o(0x45e480); /* and $0x45, %ah */
1948 o(0x40fC80); /* cmp $0x40, %ah */
1949 } else if (op
== TOK_NE
) {
1950 o(0x45e480); /* and $0x45, %ah */
1951 o(0x40f480); /* xor $0x40, %ah */
1953 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1954 o(0x05c4f6); /* test $0x05, %ah */
1957 o(0x45c4f6); /* test $0x45, %ah */
1964 /* no memory reference possible for long double operations */
1965 load(TREG_ST0
, vtop
);
1989 o(0xde); /* fxxxp %st, %st(1) */
1994 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1995 /* if saved lvalue, then we must reload it */
1998 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
2000 r
= get_reg(RC_INT
);
2002 v1
.r
= VT_LOCAL
| VT_LVAL
;
2008 if (op
== TOK_EQ
|| op
== TOK_NE
) {
2011 if (op
== TOK_LE
|| op
== TOK_LT
)
2013 if (op
== TOK_LE
|| op
== TOK_GE
) {
2014 op
= 0x93; /* setae */
2016 op
= 0x97; /* seta */
2024 assert(!(vtop
[-1].r
& VT_LVAL
));
2026 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
2028 if (op
== TOK_EQ
|| op
== TOK_NE
)
2029 o(0x2e0f); /* ucomisd */
2031 o(0x2f0f); /* comisd */
2033 if (vtop
->r
& VT_LVAL
) {
2034 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2036 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2041 vtop
->c
.i
= op
| 0x100;
2043 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
2061 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
2064 /* if saved lvalue, then we must reload it */
2065 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
2067 r
= get_reg(RC_INT
);
2069 v1
.r
= VT_LOCAL
| VT_LVAL
;
2075 assert(!(vtop
[-1].r
& VT_LVAL
));
2077 assert(vtop
->r
& VT_LVAL
);
2082 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2090 if (vtop
->r
& VT_LVAL
) {
2091 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2093 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2101 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2102 and 'long long' cases. */
2103 void gen_cvt_itof(int t
)
2105 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2108 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2109 /* signed long long to float/double/long double (unsigned case
2110 is handled generically) */
2111 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2112 o(0x242cdf); /* fildll (%rsp) */
2113 o(0x08c48348); /* add $8, %rsp */
2114 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2115 (VT_INT
| VT_UNSIGNED
)) {
2116 /* unsigned int to float/double/long double */
2117 o(0x6a); /* push $0 */
2119 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2120 o(0x242cdf); /* fildll (%rsp) */
2121 o(0x10c48348); /* add $16, %rsp */
2123 /* int to float/double/long double */
2124 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2125 o(0x2404db); /* fildl (%rsp) */
2126 o(0x08c48348); /* add $8, %rsp */
2130 int r
= get_reg(RC_FLOAT
);
2132 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2133 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2134 (VT_INT
| VT_UNSIGNED
) ||
2135 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2139 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2144 /* convert from one floating point type to another */
2145 void gen_cvt_ftof(int t
)
2153 if (bt
== VT_FLOAT
) {
2155 if (tbt
== VT_DOUBLE
) {
2156 o(0x140f); /* unpcklps */
2157 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2158 o(0x5a0f); /* cvtps2pd */
2159 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2160 } else if (tbt
== VT_LDOUBLE
) {
2162 /* movss %xmm0,-0x10(%rsp) */
2164 o(0x44 + REG_VALUE(vtop
->r
)*8);
2166 o(0xf02444d9); /* flds -0x10(%rsp) */
2169 } else if (bt
== VT_DOUBLE
) {
2171 if (tbt
== VT_FLOAT
) {
2172 o(0x140f66); /* unpcklpd */
2173 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2174 o(0x5a0f66); /* cvtpd2ps */
2175 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2176 } else if (tbt
== VT_LDOUBLE
) {
2178 /* movsd %xmm0,-0x10(%rsp) */
2180 o(0x44 + REG_VALUE(vtop
->r
)*8);
2182 o(0xf02444dd); /* fldl -0x10(%rsp) */
2188 r
= get_reg(RC_FLOAT
);
2189 if (tbt
== VT_DOUBLE
) {
2190 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2191 /* movsd -0x10(%rsp),%xmm0 */
2193 o(0x44 + REG_VALUE(r
)*8);
2196 } else if (tbt
== VT_FLOAT
) {
2197 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2198 /* movss -0x10(%rsp),%xmm0 */
2200 o(0x44 + REG_VALUE(r
)*8);
2207 /* convert fp to int 't' type */
2208 void gen_cvt_ftoi(int t
)
2210 int ft
, bt
, size
, r
;
2213 if (bt
== VT_LDOUBLE
) {
2214 gen_cvt_ftof(VT_DOUBLE
);
2224 r
= get_reg(RC_INT
);
2225 if (bt
== VT_FLOAT
) {
2227 } else if (bt
== VT_DOUBLE
) {
2232 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2233 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2237 /* computed goto support */
2244 /* Save the stack pointer onto the stack and return the location of its address */
2245 ST_FUNC
void gen_vla_sp_save(int addr
) {
2246 /* mov %rsp,addr(%rbp)*/
2247 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2250 /* Restore the SP from a location on the stack */
2251 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2252 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2255 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2256 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2257 #ifdef TCC_TARGET_PE
2258 /* alloca does more than just adjust %rsp on Windows */
2259 vpush_global_sym(&func_old_type
, TOK_alloca
);
2260 vswap(); /* Move alloca ref past allocation size */
2264 r
= gv(RC_INT
); /* allocation size */
2267 o(0xe0 | REG_VALUE(r
));
2268 /* We align to 16 bytes rather than align */
2276 /* end of x86-64 code generator */
2277 /*************************************************************/
2278 #endif /* ! TARGET_DEFS_ONLY */
2279 /******************************************************/