2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
38 #define RC_ST0 0x0080 /* only for long double */
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_LRET RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_QRET RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_LRET TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_QRET TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
105 /******************************************************/
106 #else /* ! TARGET_DEFS_ONLY */
107 /******************************************************/
111 ST_DATA
const int reg_classes
[NB_REGS
] = {
112 /* eax */ RC_INT
| RC_RAX
,
113 /* ecx */ RC_INT
| RC_RCX
,
114 /* edx */ RC_INT
| RC_RDX
,
128 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
129 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
130 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
131 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
132 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
133 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
134 /* xmm6 an xmm7 are included so gv() can be used on them,
135 but they are not tagged with RC_FLOAT because they are
136 callee saved on Windows */
142 static unsigned long func_sub_sp_offset
;
143 static int func_ret_sub
;
145 /* XXX: make it faster ? */
146 ST_FUNC
void g(int c
)
152 if (ind1
> cur_text_section
->data_allocated
)
153 section_realloc(cur_text_section
, ind1
);
154 cur_text_section
->data
[ind
] = c
;
158 ST_FUNC
void o(unsigned int c
)
166 ST_FUNC
void gen_le16(int v
)
172 ST_FUNC
void gen_le32(int c
)
180 ST_FUNC
void gen_le64(int64_t c
)
192 static void orex(int ll
, int r
, int r2
, int b
)
194 if ((r
& VT_VALMASK
) >= VT_CONST
)
196 if ((r2
& VT_VALMASK
) >= VT_CONST
)
198 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
199 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
203 /* output a symbol and patch all calls to it */
204 ST_FUNC
void gsym_addr(int t
, int a
)
207 unsigned char *ptr
= cur_text_section
->data
+ t
;
208 uint32_t n
= read32le(ptr
); /* next value */
209 write32le(ptr
, a
- t
- 4);
220 static int is64_type(int t
)
222 return ((t
& VT_BTYPE
) == VT_PTR
||
223 (t
& VT_BTYPE
) == VT_FUNC
||
224 (t
& VT_BTYPE
) == VT_LLONG
);
227 /* instruction + 4 bytes data. Return the address of the data */
228 static int oad(int c
, int s
)
239 /* generate jmp to a label */
240 #define gjmp2(instr,lbl) oad(instr,lbl)
242 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
245 greloca(cur_text_section
, sym
, ind
, R_X86_64_32S
, c
), c
=0;
249 /* output constant with relocation if 'r & VT_SYM' is true */
250 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
253 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
261 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
265 /* output got address with relocation */
266 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
269 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
270 get_tok_str(sym
->v
, NULL
), c
, r
,
271 cur_text_section
->data
[ind
-3],
272 cur_text_section
->data
[ind
-2],
273 cur_text_section
->data
[ind
-1]
276 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
279 /* we use add c, %xxx for displacement */
281 o(0xc0 + REG_VALUE(r
));
286 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
288 op_reg
= REG_VALUE(op_reg
) << 3;
289 if ((r
& VT_VALMASK
) == VT_CONST
) {
290 /* constant memory reference */
292 /* Absolute memory reference */
293 o(0x04 | op_reg
); /* [sib] | destreg */
294 oad(0x25, c
); /* disp32 */
296 o(0x05 | op_reg
); /* (%rip)+disp32 | destreg */
298 gen_gotpcrel(r
, sym
, c
);
300 gen_addrpc32(r
, sym
, c
);
303 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
304 /* currently, we use only ebp as base */
306 /* short reference */
310 oad(0x85 | op_reg
, c
);
312 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
314 g(0x80 | op_reg
| REG_VALUE(r
));
317 g(0x00 | op_reg
| REG_VALUE(r
));
320 g(0x00 | op_reg
| REG_VALUE(r
));
324 /* generate a modrm reference. 'op_reg' contains the additional 3
326 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
328 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
331 /* generate a modrm reference. 'op_reg' contains the additional 3
333 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
336 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
337 orex(1, r
, op_reg
, opcode
);
338 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
342 /* load 'r' from value 'sv' */
343 void load(int r
, SValue
*sv
)
345 int v
, t
, ft
, fc
, fr
;
350 sv
= pe_getimport(sv
, &v2
);
354 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
356 if (fc
!= sv
->c
.i
&& (fr
& VT_SYM
))
357 tcc_error("64 bit addend in load");
359 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
361 #ifndef TCC_TARGET_PE
362 /* we use indirect access via got */
363 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
364 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
365 /* use the result register as a temporal register */
366 int tr
= r
| TREG_MEM
;
368 /* we cannot use float registers as a temporal register */
369 tr
= get_reg(RC_INT
) | TREG_MEM
;
371 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
373 /* load from the temporal register */
381 if (v
== VT_LLOCAL
) {
383 v1
.r
= VT_LOCAL
| VT_LVAL
;
386 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
387 fr
= get_reg(RC_INT
);
391 /* If the addends doesn't fit into a 32bit signed
392 we must use a 64bit move. We've checked above
393 that this doesn't have a sym associated. */
394 v1
.type
.t
= VT_LLONG
;
398 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
399 fr
= get_reg(RC_INT
);
404 /* Like GCC we can load from small enough properly sized
405 structs and unions as well.
406 XXX maybe move to generic operand handling, but should
407 occur only with asm, so tccasm.c might also be a better place */
408 if ((ft
& VT_BTYPE
) == VT_STRUCT
) {
410 switch (type_size(&sv
->type
, &align
)) {
411 case 1: ft
= VT_BYTE
; break;
412 case 2: ft
= VT_SHORT
; break;
413 case 4: ft
= VT_INT
; break;
414 case 8: ft
= VT_LLONG
; break;
416 tcc_error("invalid aggregate type for register load");
420 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
422 r
= REG_VALUE(r
); /* movd */
423 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
424 b
= 0x7e0ff3; /* movq */
426 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
427 b
= 0xdb, r
= 5; /* fldt */
428 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
429 b
= 0xbe0f; /* movsbl */
430 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
431 b
= 0xb60f; /* movzbl */
432 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
433 b
= 0xbf0f; /* movswl */
434 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
435 b
= 0xb70f; /* movzwl */
437 assert(((ft
& VT_BTYPE
) == VT_INT
)
438 || ((ft
& VT_BTYPE
) == VT_LLONG
)
439 || ((ft
& VT_BTYPE
) == VT_PTR
)
440 || ((ft
& VT_BTYPE
) == VT_FUNC
)
446 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
449 gen_modrm(r
, fr
, sv
->sym
, fc
);
456 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
457 gen_addrpc32(fr
, sv
->sym
, fc
);
459 if (sv
->sym
->type
.t
& VT_STATIC
) {
461 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
462 gen_addrpc32(fr
, sv
->sym
, fc
);
465 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
466 gen_gotpcrel(r
, sv
->sym
, fc
);
469 } else if (is64_type(ft
)) {
470 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
473 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
476 } else if (v
== VT_LOCAL
) {
477 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
478 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
479 } else if (v
== VT_CMP
) {
481 if ((fc
& ~0x100) != TOK_NE
)
482 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
484 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
487 /* This was a float compare. If the parity bit is
488 set the result was unordered, meaning false for everything
489 except TOK_NE, and true for TOK_NE. */
491 o(0x037a + (REX_BASE(r
) << 8));
493 orex(0,r
,0, 0x0f); /* setxx %br */
495 o(0xc0 + REG_VALUE(r
));
496 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
499 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
500 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
503 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
505 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
507 /* gen_cvt_ftof(VT_DOUBLE); */
508 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
509 /* movsd -0x10(%rsp),%xmmN */
511 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
514 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
515 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
518 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
521 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
523 } else if (r
== TREG_ST0
) {
524 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
525 /* gen_cvt_ftof(VT_LDOUBLE); */
526 /* movsd %xmmN,-0x10(%rsp) */
528 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
530 o(0xf02444dd); /* fldl -0x10(%rsp) */
533 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
539 /* store register 'r' in lvalue 'v' */
540 void store(int r
, SValue
*v
)
544 /* store the REX prefix in this variable when PIC is enabled */
549 v
= pe_getimport(v
, &v2
);
552 fr
= v
->r
& VT_VALMASK
;
555 if (fc
!= v
->c
.i
&& (fr
& VT_SYM
))
556 tcc_error("64 bit addend in store");
557 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
560 #ifndef TCC_TARGET_PE
561 /* we need to access the variable via got */
562 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
563 /* mov xx(%rip), %r11 */
565 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
566 pic
= is64_type(bt
) ? 0x49 : 0x41;
570 /* XXX: incorrect if float reg to reg */
571 if (bt
== VT_FLOAT
) {
574 o(0x7e0f); /* movd */
576 } else if (bt
== VT_DOUBLE
) {
579 o(0xd60f); /* movq */
581 } else if (bt
== VT_LDOUBLE
) {
582 o(0xc0d9); /* fld %st(0) */
590 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
592 else if (is64_type(bt
))
598 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
603 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
604 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
605 } else if (fr
!= r
) {
606 /* XXX: don't we really come here? */
608 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
611 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
612 gen_modrm(r
, v
->r
, v
->sym
, fc
);
613 } else if (fr
!= r
) {
614 /* XXX: don't we really come here? */
616 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
621 /* 'is_jmp' is '1' if it is a jump */
622 static void gcall_or_jmp(int is_jmp
)
625 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
626 ((vtop
->r
& VT_SYM
) && (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
627 /* constant symbolic case -> simple relocation */
629 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
631 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
633 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
635 /* otherwise, indirect call */
639 o(0xff); /* call/jmp *r */
640 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
644 #if defined(CONFIG_TCC_BCHECK)
645 #ifndef TCC_TARGET_PE
646 static addr_t func_bound_offset
;
647 static unsigned long func_bound_ind
;
650 static void gen_static_call(int v
)
652 Sym
*sym
= external_global_sym(v
, &func_old_type
, 0);
654 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
657 /* generate a bounded pointer addition */
658 ST_FUNC
void gen_bounded_ptr_add(void)
660 /* save all temporary registers */
663 /* prepare fast x86_64 function call */
665 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
669 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
672 /* do a fast function call */
673 gen_static_call(TOK___bound_ptr_add
);
675 /* returned pointer is in rax */
677 vtop
->r
= TREG_RAX
| VT_BOUNDED
;
680 /* relocation offset of the bounding function call point */
681 vtop
->c
.i
= (cur_text_section
->reloc
->data_offset
- sizeof(ElfW(Rela
)));
684 /* patch pointer addition in vtop so that pointer dereferencing is
686 ST_FUNC
void gen_bounded_ptr_deref(void)
694 /* XXX: put that code in generic part of tcc */
695 if (!is_float(vtop
->type
.t
)) {
696 if (vtop
->r
& VT_LVAL_BYTE
)
698 else if (vtop
->r
& VT_LVAL_SHORT
)
702 size
= type_size(&vtop
->type
, &align
);
704 case 1: func
= TOK___bound_ptr_indir1
; break;
705 case 2: func
= TOK___bound_ptr_indir2
; break;
706 case 4: func
= TOK___bound_ptr_indir4
; break;
707 case 8: func
= TOK___bound_ptr_indir8
; break;
708 case 12: func
= TOK___bound_ptr_indir12
; break;
709 case 16: func
= TOK___bound_ptr_indir16
; break;
711 tcc_error("unhandled size when dereferencing bounded pointer");
716 sym
= external_global_sym(func
, &func_old_type
, 0);
718 put_extern_sym(sym
, NULL
, 0, 0);
720 /* patch relocation */
721 /* XXX: find a better solution ? */
723 rel
= (ElfW(Rela
) *)(cur_text_section
->reloc
->data
+ vtop
->c
.i
);
724 rel
->r_info
= ELF64_R_INFO(sym
->c
, ELF64_R_TYPE(rel
->r_info
));
731 static const uint8_t arg_regs
[REGN
] = {
732 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
735 /* Prepare arguments in R10 and R11 rather than RCX and RDX
736 because gv() will not ever use these */
737 static int arg_prepare_reg(int idx
) {
738 if (idx
== 0 || idx
== 1)
739 /* idx=0: r10, idx=1: r11 */
742 return arg_regs
[idx
];
745 static int func_scratch
, func_alloca
;
747 /* Generate function call. The function address is pushed first, then
748 all the parameters in call order. This functions pops all the
749 parameters and the function address. */
751 static void gen_offs_sp(int b
, int r
, int d
)
753 orex(1,0,r
& 0x100 ? 0 : r
, b
);
755 o(0x2444 | (REG_VALUE(r
) << 3));
758 o(0x2484 | (REG_VALUE(r
) << 3));
763 static int using_regs(int size
)
765 return !(size
> 8 || (size
& (size
- 1)));
768 /* Return the number of registers needed to return the struct, or 0 if
769 returning via struct pointer. */
770 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
773 *ret_align
= 1; // Never have to re-align return values for x86-64
775 size
= type_size(vt
, &align
);
776 if (!using_regs(size
))
790 static int is_sse_float(int t
) {
793 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
796 static int gfunc_arg_size(CType
*type
) {
798 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
800 return type_size(type
, &align
);
803 void gfunc_call(int nb_args
)
805 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
808 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
811 /* for struct arguments, we need to call memcpy and the function
812 call breaks register passing arguments we are preparing.
813 So, we process arguments which will be passed by stack first. */
814 struct_size
= args_size
;
815 for(i
= 0; i
< nb_args
; i
++) {
820 bt
= (sv
->type
.t
& VT_BTYPE
);
821 size
= gfunc_arg_size(&sv
->type
);
823 if (using_regs(size
))
824 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
826 if (bt
== VT_STRUCT
) {
827 /* align to stack align size */
828 size
= (size
+ 15) & ~15;
829 /* generate structure store */
831 gen_offs_sp(0x8d, r
, struct_size
);
834 /* generate memcpy call */
835 vset(&sv
->type
, r
| VT_LVAL
, 0);
839 } else if (bt
== VT_LDOUBLE
) {
841 gen_offs_sp(0xdb, 0x107, struct_size
);
846 if (func_scratch
< struct_size
)
847 func_scratch
= struct_size
;
850 struct_size
= args_size
;
852 for(i
= 0; i
< nb_args
; i
++) {
854 bt
= (vtop
->type
.t
& VT_BTYPE
);
856 size
= gfunc_arg_size(&vtop
->type
);
857 if (!using_regs(size
)) {
858 /* align to stack align size */
859 size
= (size
+ 15) & ~15;
862 gen_offs_sp(0x8d, d
, struct_size
);
863 gen_offs_sp(0x89, d
, arg
*8);
865 d
= arg_prepare_reg(arg
);
866 gen_offs_sp(0x8d, d
, struct_size
);
870 if (is_sse_float(vtop
->type
.t
)) {
871 if (tcc_state
->nosse
)
872 tcc_error("SSE disabled");
875 /* movq %xmm0, j*8(%rsp) */
876 gen_offs_sp(0xd60f66, 0x100, arg
*8);
878 /* Load directly to xmmN register */
880 d
= arg_prepare_reg(arg
);
881 /* mov %xmmN, %rxx */
884 o(0xc0 + arg
*8 + REG_VALUE(d
));
887 if (bt
== VT_STRUCT
) {
888 vtop
->type
.ref
= NULL
;
889 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
890 : size
> 1 ? VT_SHORT
: VT_BYTE
;
895 gen_offs_sp(0x89, r
, arg
*8);
897 d
= arg_prepare_reg(arg
);
898 orex(1,d
,r
,0x89); /* mov */
899 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
906 /* Copy R10 and R11 into RCX and RDX, respectively */
908 o(0xd1894c); /* mov %r10, %rcx */
910 o(0xda894c); /* mov %r11, %rdx */
916 if ((vtop
->r
& VT_SYM
) && vtop
->sym
->v
== TOK_alloca
) {
917 /* need to add the "func_scratch" area after alloca */
918 o(0x0548), gen_le32(func_alloca
), func_alloca
= ind
- 4;
921 /* other compilers don't clear the upper bits when returning char/short */
922 bt
= vtop
->type
.ref
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
);
923 if (bt
== (VT_BYTE
| VT_UNSIGNED
))
924 o(0xc0b60f); /* movzbl %al, %eax */
925 else if (bt
== VT_BYTE
)
926 o(0xc0be0f); /* movsbl %al, %eax */
927 else if (bt
== VT_SHORT
)
929 else if (bt
== (VT_SHORT
| VT_UNSIGNED
))
930 o(0xc0b70f); /* movzbl %al, %eax */
931 #if 0 /* handled in gen_cast() */
932 else if (bt
== VT_INT
)
933 o(0x9848); /* cltq */
934 else if (bt
== (VT_INT
| VT_UNSIGNED
))
935 o(0xc089); /* mov %eax,%eax */
941 #define FUNC_PROLOG_SIZE 11
943 /* generate function prolog of type 't' */
944 void gfunc_prolog(CType
*func_type
)
946 int addr
, reg_param_index
, bt
, size
;
956 ind
+= FUNC_PROLOG_SIZE
;
957 func_sub_sp_offset
= ind
;
960 sym
= func_type
->ref
;
962 /* if the function returns a structure, then add an
963 implicit pointer parameter */
965 func_var
= (sym
->f
.func_type
== FUNC_ELLIPSIS
);
966 size
= gfunc_arg_size(&func_vt
);
967 if (!using_regs(size
)) {
968 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
974 /* define parameters */
975 while ((sym
= sym
->next
) != NULL
) {
977 bt
= type
->t
& VT_BTYPE
;
978 size
= gfunc_arg_size(type
);
979 if (!using_regs(size
)) {
980 if (reg_param_index
< REGN
) {
981 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
983 sym_push(sym
->v
& ~SYM_FIELD
, type
,
984 VT_LLOCAL
| lvalue_type(type
->t
), addr
);
986 if (reg_param_index
< REGN
) {
987 /* save arguments passed by register */
988 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
989 if (tcc_state
->nosse
)
990 tcc_error("SSE disabled");
991 o(0xd60f66); /* movq */
992 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
994 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
997 sym_push(sym
->v
& ~SYM_FIELD
, type
,
998 VT_LOCAL
| lvalue_type(type
->t
), addr
);
1004 while (reg_param_index
< REGN
) {
1005 if (func_type
->ref
->f
.func_type
== FUNC_ELLIPSIS
) {
1006 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
1013 /* generate function epilog */
1014 void gfunc_epilog(void)
1018 o(0xc9); /* leave */
1019 if (func_ret_sub
== 0) {
1022 o(0xc2); /* ret n */
1024 g(func_ret_sub
>> 8);
1028 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1029 /* align local size to word & save local variables */
1030 func_scratch
= (func_scratch
+ 15) & -16;
1031 v
= (func_scratch
+ -loc
+ 15) & -16;
1034 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
1035 oad(0xb8, v
); /* mov stacksize, %eax */
1036 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1037 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
1038 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1040 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1041 o(0xec8148); /* sub rsp, stacksize */
1045 /* add the "func_scratch" area after each alloca seen */
1046 while (func_alloca
) {
1047 unsigned char *ptr
= cur_text_section
->data
+ func_alloca
;
1048 func_alloca
= read32le(ptr
);
1049 write32le(ptr
, func_scratch
);
1052 cur_text_section
->data_offset
= saved_ind
;
1053 pe_add_unwind_data(ind
, saved_ind
, v
);
1054 ind
= cur_text_section
->data_offset
;
1059 static void gadd_sp(int val
)
1061 if (val
== (char)val
) {
1065 oad(0xc48148, val
); /* add $xxx, %rsp */
1069 typedef enum X86_64_Mode
{
1072 x86_64_mode_integer
,
1077 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1081 else if (a
== x86_64_mode_none
)
1083 else if (b
== x86_64_mode_none
)
1085 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1086 return x86_64_mode_memory
;
1087 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1088 return x86_64_mode_integer
;
1089 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1090 return x86_64_mode_memory
;
1092 return x86_64_mode_sse
;
1095 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1100 switch (ty
->t
& VT_BTYPE
) {
1101 case VT_VOID
: return x86_64_mode_none
;
1110 return x86_64_mode_integer
;
1113 case VT_DOUBLE
: return x86_64_mode_sse
;
1115 case VT_LDOUBLE
: return x86_64_mode_x87
;
1120 mode
= x86_64_mode_none
;
1121 for (f
= f
->next
; f
; f
= f
->next
)
1122 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1130 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1133 int size
, align
, ret_t
= 0;
1135 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1140 mode
= x86_64_mode_integer
;
1142 size
= type_size(ty
, &align
);
1143 *psize
= (size
+ 7) & ~7;
1144 *palign
= (align
+ 7) & ~7;
1147 mode
= x86_64_mode_memory
;
1149 mode
= classify_x86_64_inner(ty
);
1151 case x86_64_mode_integer
:
1157 ret_t
= (size
> 4) ? VT_LLONG
: VT_INT
;
1161 case x86_64_mode_x87
:
1166 case x86_64_mode_sse
:
1172 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1175 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1188 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1190 /* This definition must be synced with stdarg.h */
1191 enum __va_arg_type
{
1192 __va_gen_reg
, __va_float_reg
, __va_stack
1194 int size
, align
, reg_count
;
1195 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1197 default: return __va_stack
;
1198 case x86_64_mode_integer
: return __va_gen_reg
;
1199 case x86_64_mode_sse
: return __va_float_reg
;
1203 /* Return the number of registers needed to return the struct, or 0 if
1204 returning via struct pointer. */
1205 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1207 int size
, align
, reg_count
;
1208 *ret_align
= 1; // Never have to re-align return values for x86-64
1210 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1214 static const uint8_t arg_regs
[REGN
] = {
1215 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1218 static int arg_prepare_reg(int idx
) {
1219 if (idx
== 2 || idx
== 3)
1220 /* idx=2: r10, idx=3: r11 */
1223 return arg_regs
[idx
];
1226 /* Generate function call. The function address is pushed first, then
1227 all the parameters in call order. This functions pops all the
1228 parameters and the function address. */
1229 void gfunc_call(int nb_args
)
1233 int size
, align
, r
, args_size
, stack_adjust
, i
, reg_count
;
1234 int nb_reg_args
= 0;
1235 int nb_sse_args
= 0;
1236 int sse_reg
, gen_reg
;
1237 char _onstack
[nb_args
], *onstack
= _onstack
;
1239 /* calculate the number of integer/float register arguments, remember
1240 arguments to be passed via stack (in onstack[]), and also remember
1241 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1242 to be done in a left-to-right pass over arguments. */
1244 for(i
= nb_args
- 1; i
>= 0; i
--) {
1245 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1246 if (mode
== x86_64_mode_sse
&& nb_sse_args
+ reg_count
<= 8) {
1247 nb_sse_args
+= reg_count
;
1249 } else if (mode
== x86_64_mode_integer
&& nb_reg_args
+ reg_count
<= REGN
) {
1250 nb_reg_args
+= reg_count
;
1252 } else if (mode
== x86_64_mode_none
) {
1255 if (align
== 16 && (stack_adjust
&= 15)) {
1260 stack_adjust
+= size
;
1264 if (nb_sse_args
&& tcc_state
->nosse
)
1265 tcc_error("SSE disabled but floating point arguments passed");
1267 /* fetch cpu flag before generating any code */
1268 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
1271 /* for struct arguments, we need to call memcpy and the function
1272 call breaks register passing arguments we are preparing.
1273 So, we process arguments which will be passed by stack first. */
1274 gen_reg
= nb_reg_args
;
1275 sse_reg
= nb_sse_args
;
1278 for (i
= 0; i
< nb_args
;) {
1279 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1284 /* Possibly adjust stack to align SSE boundary. We're processing
1285 args from right to left while allocating happens left to right
1286 (stack grows down), so the adjustment needs to happen _after_
1287 an argument that requires it. */
1289 o(0x50); /* push %rax; aka sub $8,%rsp */
1293 if (onstack
[i
] == 2)
1298 switch (vtop
->type
.t
& VT_BTYPE
) {
1300 /* allocate the necessary size on stack */
1302 oad(0xec81, size
); /* sub $xxx, %rsp */
1303 /* generate structure store */
1304 r
= get_reg(RC_INT
);
1305 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1306 o(0xe0 + REG_VALUE(r
));
1307 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1314 oad(0xec8148, size
); /* sub $xxx, %rsp */
1315 o(0x7cdb); /* fstpt 0(%rsp) */
1322 assert(mode
== x86_64_mode_sse
);
1324 o(0x50); /* push $rax */
1325 /* movq %xmmN, (%rsp) */
1327 o(0x04 + REG_VALUE(r
)*8);
1332 assert(mode
== x86_64_mode_integer
);
1334 /* XXX: implicit cast ? */
1336 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1346 /* XXX This should be superfluous. */
1347 save_regs(0); /* save used temporary registers */
1349 /* then, we prepare register passing arguments.
1350 Note that we cannot set RDX and RCX in this loop because gv()
1351 may break these temporary registers. Let's use R10 and R11
1353 assert(gen_reg
<= REGN
);
1354 assert(sse_reg
<= 8);
1355 for(i
= 0; i
< nb_args
; i
++) {
1356 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1357 /* Alter stack entry type so that gv() knows how to treat it */
1359 if (mode
== x86_64_mode_sse
) {
1360 if (reg_count
== 2) {
1362 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1363 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1364 /* movaps %xmm0, %xmmN */
1366 o(0xc0 + (sse_reg
<< 3));
1367 /* movaps %xmm1, %xmmN */
1369 o(0xc1 + ((sse_reg
+1) << 3));
1372 assert(reg_count
== 1);
1374 /* Load directly to register */
1375 gv(RC_XMM0
<< sse_reg
);
1377 } else if (mode
== x86_64_mode_integer
) {
1379 /* XXX: implicit cast ? */
1381 gen_reg
-= reg_count
;
1383 d
= arg_prepare_reg(gen_reg
);
1384 orex(1,d
,r
,0x89); /* mov */
1385 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1386 if (reg_count
== 2) {
1387 d
= arg_prepare_reg(gen_reg
+1);
1388 orex(1,d
,vtop
->r2
,0x89); /* mov */
1389 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1394 assert(gen_reg
== 0);
1395 assert(sse_reg
== 0);
1397 /* We shouldn't have many operands on the stack anymore, but the
1398 call address itself is still there, and it might be in %eax
1399 (or edx/ecx) currently, which the below writes would clobber.
1400 So evict all remaining operands here. */
1403 /* Copy R10 and R11 into RDX and RCX, respectively */
1404 if (nb_reg_args
> 2) {
1405 o(0xd2894c); /* mov %r10, %rdx */
1406 if (nb_reg_args
> 3) {
1407 o(0xd9894c); /* mov %r11, %rcx */
1411 if (vtop
->type
.ref
->f
.func_type
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1412 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1420 #define FUNC_PROLOG_SIZE 11
1422 static void push_arg_reg(int i
) {
1424 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1427 /* generate function prolog of type 't' */
1428 void gfunc_prolog(CType
*func_type
)
1431 int i
, addr
, align
, size
, reg_count
;
1432 int param_addr
= 0, reg_param_index
, sse_param_index
;
1436 sym
= func_type
->ref
;
1437 addr
= PTR_SIZE
* 2;
1439 ind
+= FUNC_PROLOG_SIZE
;
1440 func_sub_sp_offset
= ind
;
1443 if (sym
->f
.func_type
== FUNC_ELLIPSIS
) {
1444 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1445 seen_reg_num
= seen_sse_num
= 0;
1446 /* frame pointer and return address */
1447 seen_stack_size
= PTR_SIZE
* 2;
1448 /* count the number of seen parameters */
1449 sym
= func_type
->ref
;
1450 while ((sym
= sym
->next
) != NULL
) {
1452 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1456 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1459 case x86_64_mode_integer
:
1460 if (seen_reg_num
+ reg_count
> REGN
)
1462 seen_reg_num
+= reg_count
;
1465 case x86_64_mode_sse
:
1466 if (seen_sse_num
+ reg_count
> 8)
1468 seen_sse_num
+= reg_count
;
1474 /* movl $0x????????, -0x10(%rbp) */
1476 gen_le32(seen_reg_num
* 8);
1477 /* movl $0x????????, -0xc(%rbp) */
1479 gen_le32(seen_sse_num
* 16 + 48);
1480 /* movl $0x????????, -0x8(%rbp) */
1482 gen_le32(seen_stack_size
);
1484 /* save all register passing arguments */
1485 for (i
= 0; i
< 8; i
++) {
1487 if (!tcc_state
->nosse
) {
1488 o(0xd60f66); /* movq */
1489 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1491 /* movq $0, loc+8(%rbp) */
1496 for (i
= 0; i
< REGN
; i
++) {
1497 push_arg_reg(REGN
-1-i
);
1501 sym
= func_type
->ref
;
1502 reg_param_index
= 0;
1503 sse_param_index
= 0;
1505 /* if the function returns a structure, then add an
1506 implicit pointer parameter */
1507 func_vt
= sym
->type
;
1508 mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1509 if (mode
== x86_64_mode_memory
) {
1510 push_arg_reg(reg_param_index
);
1514 /* define parameters */
1515 while ((sym
= sym
->next
) != NULL
) {
1517 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1519 case x86_64_mode_sse
:
1520 if (tcc_state
->nosse
)
1521 tcc_error("SSE disabled but floating point arguments used");
1522 if (sse_param_index
+ reg_count
<= 8) {
1523 /* save arguments passed by register */
1524 loc
-= reg_count
* 8;
1526 for (i
= 0; i
< reg_count
; ++i
) {
1527 o(0xd60f66); /* movq */
1528 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1532 addr
= (addr
+ align
- 1) & -align
;
1538 case x86_64_mode_memory
:
1539 case x86_64_mode_x87
:
1540 addr
= (addr
+ align
- 1) & -align
;
1545 case x86_64_mode_integer
: {
1546 if (reg_param_index
+ reg_count
<= REGN
) {
1547 /* save arguments passed by register */
1548 loc
-= reg_count
* 8;
1550 for (i
= 0; i
< reg_count
; ++i
) {
1551 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1555 addr
= (addr
+ align
- 1) & -align
;
1561 default: break; /* nothing to be done for x86_64_mode_none */
1563 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1564 VT_LOCAL
| lvalue_type(type
->t
), param_addr
);
1567 #ifdef CONFIG_TCC_BCHECK
1568 /* leave some room for bound checking code */
1569 if (tcc_state
->do_bounds_check
) {
1570 func_bound_offset
= lbounds_section
->data_offset
;
1571 func_bound_ind
= ind
;
1572 oad(0xb8, 0); /* lbound section pointer */
1573 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1574 oad(0xb8, 0); /* call to function */
1579 /* generate function epilog */
1580 void gfunc_epilog(void)
1584 #ifdef CONFIG_TCC_BCHECK
1585 if (tcc_state
->do_bounds_check
1586 && func_bound_offset
!= lbounds_section
->data_offset
)
1592 /* add end of table info */
1593 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
1596 /* generate bound local allocation */
1597 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
1598 func_bound_offset
, lbounds_section
->data_offset
);
1600 ind
= func_bound_ind
;
1601 greloca(cur_text_section
, sym_data
, ind
+ 1, R_X86_64_64
, 0);
1603 gen_static_call(TOK___bound_local_new
);
1606 /* generate bound check local freeing */
1607 o(0x5250); /* save returned value, if any */
1608 greloca(cur_text_section
, sym_data
, ind
+ 1, R_X86_64_64
, 0);
1609 oad(0xb8, 0); /* mov xxx, %rax */
1610 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1611 gen_static_call(TOK___bound_local_delete
);
1612 o(0x585a); /* restore returned value, if any */
1615 o(0xc9); /* leave */
1616 if (func_ret_sub
== 0) {
1619 o(0xc2); /* ret n */
1621 g(func_ret_sub
>> 8);
1623 /* align local size to word & save local variables */
1624 v
= (-loc
+ 15) & -16;
1626 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1627 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1628 o(0xec8148); /* sub rsp, stacksize */
1635 ST_FUNC
void gen_fill_nops(int bytes
)
1641 /* generate a jump to a label */
1644 return gjmp2(0xe9, t
);
1647 /* generate a jump to a fixed address */
1648 void gjmp_addr(int a
)
1656 oad(0xe9, a
- ind
- 5);
1660 ST_FUNC
void gtst_addr(int inv
, int a
)
1662 int v
= vtop
->r
& VT_VALMASK
;
1664 inv
^= (vtop
--)->c
.i
;
1671 oad(inv
- 16, a
- 4);
1673 } else if ((v
& ~1) == VT_JMP
) {
1674 if ((v
& 1) != inv
) {
1686 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1687 ST_FUNC
int gtst(int inv
, int t
)
1689 int v
= vtop
->r
& VT_VALMASK
;
1691 if (nocode_wanted
) {
1693 } else if (v
== VT_CMP
) {
1694 /* fast case : can jump directly since flags are set */
1695 if (vtop
->c
.i
& 0x100)
1697 /* This was a float compare. If the parity flag is set
1698 the result was unordered. For anything except != this
1699 means false and we don't jump (anding both conditions).
1700 For != this means true (oring both).
1701 Take care about inverting the test. We need to jump
1702 to our target if the result was unordered and test wasn't NE,
1703 otherwise if unordered we don't want to jump. */
1704 vtop
->c
.i
&= ~0x100;
1705 if (inv
== (vtop
->c
.i
== TOK_NE
))
1706 o(0x067a); /* jp +6 */
1710 t
= gjmp2(0x8a, t
); /* jp t */
1714 t
= gjmp2((vtop
->c
.i
- 16) ^ inv
, t
);
1715 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1716 /* && or || optimization */
1717 if ((v
& 1) == inv
) {
1718 /* insert vtop->c jump list in t */
1719 uint32_t n1
, n
= vtop
->c
.i
;
1721 while ((n1
= read32le(cur_text_section
->data
+ n
)))
1723 write32le(cur_text_section
->data
+ n
, t
);
1735 /* generate an integer binary operation */
1736 void gen_opi(int op
)
1741 ll
= is64_type(vtop
[-1].type
.t
);
1742 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1743 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1747 case TOK_ADDC1
: /* add with carry generation */
1750 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1757 /* XXX: generate inc and dec for smaller code ? */
1758 orex(ll
, r
, 0, 0x83);
1759 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1762 orex(ll
, r
, 0, 0x81);
1763 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1766 gv2(RC_INT
, RC_INT
);
1769 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1770 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1773 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1779 case TOK_SUBC1
: /* sub with carry generation */
1782 case TOK_ADDC2
: /* add with carry use */
1785 case TOK_SUBC2
: /* sub with carry use */
1798 gv2(RC_INT
, RC_INT
);
1801 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1802 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1814 opc
= 0xc0 | (opc
<< 3);
1820 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1821 o(opc
| REG_VALUE(r
));
1822 g(vtop
->c
.i
& (ll
? 63 : 31));
1824 /* we generate the shift in ecx */
1825 gv2(RC_INT
, RC_RCX
);
1827 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1828 o(opc
| REG_VALUE(r
));
1841 /* first operand must be in eax */
1842 /* XXX: need better constraint for second operand */
1843 gv2(RC_RAX
, RC_RCX
);
1848 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1849 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1850 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1851 if (op
== '%' || op
== TOK_UMOD
)
1863 void gen_opl(int op
)
1868 /* generate a floating point operation 'v = t1 op t2' instruction. The
1869 two operands are guaranteed to have the same floating point type */
1870 /* XXX: need to use ST1 too */
1871 void gen_opf(int op
)
1873 int a
, ft
, fc
, swapped
, r
;
1875 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1877 /* convert constants to memory references */
1878 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1883 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1886 /* must put at least one value in the floating point register */
1887 if ((vtop
[-1].r
& VT_LVAL
) &&
1888 (vtop
[0].r
& VT_LVAL
)) {
1894 /* swap the stack if needed so that t1 is the register and t2 is
1895 the memory reference */
1896 if (vtop
[-1].r
& VT_LVAL
) {
1900 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1901 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1902 /* load on stack second operand */
1903 load(TREG_ST0
, vtop
);
1904 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1905 if (op
== TOK_GE
|| op
== TOK_GT
)
1907 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1910 o(0xc9d9); /* fxch %st(1) */
1911 if (op
== TOK_EQ
|| op
== TOK_NE
)
1912 o(0xe9da); /* fucompp */
1914 o(0xd9de); /* fcompp */
1915 o(0xe0df); /* fnstsw %ax */
1917 o(0x45e480); /* and $0x45, %ah */
1918 o(0x40fC80); /* cmp $0x40, %ah */
1919 } else if (op
== TOK_NE
) {
1920 o(0x45e480); /* and $0x45, %ah */
1921 o(0x40f480); /* xor $0x40, %ah */
1923 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1924 o(0x05c4f6); /* test $0x05, %ah */
1927 o(0x45c4f6); /* test $0x45, %ah */
1934 /* no memory reference possible for long double operations */
1935 load(TREG_ST0
, vtop
);
1959 o(0xde); /* fxxxp %st, %st(1) */
1964 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1965 /* if saved lvalue, then we must reload it */
1968 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1970 r
= get_reg(RC_INT
);
1972 v1
.r
= VT_LOCAL
| VT_LVAL
;
1978 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1981 if (op
== TOK_LE
|| op
== TOK_LT
)
1983 if (op
== TOK_LE
|| op
== TOK_GE
) {
1984 op
= 0x93; /* setae */
1986 op
= 0x97; /* seta */
1994 assert(!(vtop
[-1].r
& VT_LVAL
));
1996 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1998 if (op
== TOK_EQ
|| op
== TOK_NE
)
1999 o(0x2e0f); /* ucomisd */
2001 o(0x2f0f); /* comisd */
2003 if (vtop
->r
& VT_LVAL
) {
2004 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2006 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2011 vtop
->c
.i
= op
| 0x100;
2013 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
2031 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
2034 /* if saved lvalue, then we must reload it */
2035 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
2037 r
= get_reg(RC_INT
);
2039 v1
.r
= VT_LOCAL
| VT_LVAL
;
2045 assert(!(vtop
[-1].r
& VT_LVAL
));
2047 assert(vtop
->r
& VT_LVAL
);
2052 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2060 if (vtop
->r
& VT_LVAL
) {
2061 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2063 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2071 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2072 and 'long long' cases. */
2073 void gen_cvt_itof(int t
)
2075 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2078 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2079 /* signed long long to float/double/long double (unsigned case
2080 is handled generically) */
2081 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2082 o(0x242cdf); /* fildll (%rsp) */
2083 o(0x08c48348); /* add $8, %rsp */
2084 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2085 (VT_INT
| VT_UNSIGNED
)) {
2086 /* unsigned int to float/double/long double */
2087 o(0x6a); /* push $0 */
2089 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2090 o(0x242cdf); /* fildll (%rsp) */
2091 o(0x10c48348); /* add $16, %rsp */
2093 /* int to float/double/long double */
2094 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2095 o(0x2404db); /* fildl (%rsp) */
2096 o(0x08c48348); /* add $8, %rsp */
2100 int r
= get_reg(RC_FLOAT
);
2102 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2103 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2104 (VT_INT
| VT_UNSIGNED
) ||
2105 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2109 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2114 /* convert from one floating point type to another */
2115 void gen_cvt_ftof(int t
)
2123 if (bt
== VT_FLOAT
) {
2125 if (tbt
== VT_DOUBLE
) {
2126 o(0x140f); /* unpcklps */
2127 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2128 o(0x5a0f); /* cvtps2pd */
2129 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2130 } else if (tbt
== VT_LDOUBLE
) {
2132 /* movss %xmm0,-0x10(%rsp) */
2134 o(0x44 + REG_VALUE(vtop
->r
)*8);
2136 o(0xf02444d9); /* flds -0x10(%rsp) */
2139 } else if (bt
== VT_DOUBLE
) {
2141 if (tbt
== VT_FLOAT
) {
2142 o(0x140f66); /* unpcklpd */
2143 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2144 o(0x5a0f66); /* cvtpd2ps */
2145 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2146 } else if (tbt
== VT_LDOUBLE
) {
2148 /* movsd %xmm0,-0x10(%rsp) */
2150 o(0x44 + REG_VALUE(vtop
->r
)*8);
2152 o(0xf02444dd); /* fldl -0x10(%rsp) */
2158 r
= get_reg(RC_FLOAT
);
2159 if (tbt
== VT_DOUBLE
) {
2160 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2161 /* movsd -0x10(%rsp),%xmm0 */
2163 o(0x44 + REG_VALUE(r
)*8);
2166 } else if (tbt
== VT_FLOAT
) {
2167 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2168 /* movss -0x10(%rsp),%xmm0 */
2170 o(0x44 + REG_VALUE(r
)*8);
2177 /* convert fp to int 't' type */
2178 void gen_cvt_ftoi(int t
)
2180 int ft
, bt
, size
, r
;
2183 if (bt
== VT_LDOUBLE
) {
2184 gen_cvt_ftof(VT_DOUBLE
);
2194 r
= get_reg(RC_INT
);
2195 if (bt
== VT_FLOAT
) {
2197 } else if (bt
== VT_DOUBLE
) {
2202 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2203 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2207 /* computed goto support */
2214 /* Save the stack pointer onto the stack and return the location of its address */
2215 ST_FUNC
void gen_vla_sp_save(int addr
) {
2216 /* mov %rsp,addr(%rbp)*/
2217 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2220 /* Restore the SP from a location on the stack */
2221 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2222 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2225 #ifdef TCC_TARGET_PE
2226 /* Save result of gen_vla_alloc onto the stack */
2227 ST_FUNC
void gen_vla_result(int addr
) {
2228 /* mov %rax,addr(%rbp)*/
2229 gen_modrm64(0x89, TREG_RAX
, VT_LOCAL
, NULL
, addr
);
2233 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2234 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2235 #ifdef TCC_TARGET_PE
2236 /* alloca does more than just adjust %rsp on Windows */
2237 vpush_global_sym(&func_old_type
, TOK_alloca
);
2238 vswap(); /* Move alloca ref past allocation size */
2242 r
= gv(RC_INT
); /* allocation size */
2245 o(0xe0 | REG_VALUE(r
));
2246 /* We align to 16 bytes rather than align */
2254 /* end of x86-64 code generator */
2255 /*************************************************************/
2256 #endif /* ! TARGET_DEFS_ONLY */
2257 /******************************************************/