2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
38 #define RC_ST0 0x0080 /* only for long double */
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_LRET RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_QRET RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_LRET TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_QRET TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
105 /******************************************************/
106 #else /* ! TARGET_DEFS_ONLY */
107 /******************************************************/
111 ST_DATA
const int reg_classes
[NB_REGS
] = {
112 /* eax */ RC_INT
| RC_RAX
,
113 /* ecx */ RC_INT
| RC_RCX
,
114 /* edx */ RC_INT
| RC_RDX
,
128 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
129 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
130 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
131 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
132 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
133 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
134 /* xmm6 an xmm7 are included so gv() can be used on them,
135 but they are not tagged with RC_FLOAT because they are
136 callee saved on Windows */
142 static unsigned long func_sub_sp_offset
;
143 static int func_ret_sub
;
145 /* XXX: make it faster ? */
146 ST_FUNC
void g(int c
)
152 if (ind1
> cur_text_section
->data_allocated
)
153 section_realloc(cur_text_section
, ind1
);
154 cur_text_section
->data
[ind
] = c
;
158 ST_FUNC
void o(unsigned int c
)
166 ST_FUNC
void gen_le16(int v
)
172 ST_FUNC
void gen_le32(int c
)
180 ST_FUNC
void gen_le64(int64_t c
)
192 static void orex(int ll
, int r
, int r2
, int b
)
194 if ((r
& VT_VALMASK
) >= VT_CONST
)
196 if ((r2
& VT_VALMASK
) >= VT_CONST
)
198 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
199 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
203 /* output a symbol and patch all calls to it */
204 ST_FUNC
void gsym_addr(int t
, int a
)
207 unsigned char *ptr
= cur_text_section
->data
+ t
;
208 uint32_t n
= read32le(ptr
); /* next value */
209 write32le(ptr
, a
- t
- 4);
220 static int is64_type(int t
)
222 return ((t
& VT_BTYPE
) == VT_PTR
||
223 (t
& VT_BTYPE
) == VT_FUNC
||
224 (t
& VT_BTYPE
) == VT_LLONG
);
227 /* instruction + 4 bytes data. Return the address of the data */
228 static int oad(int c
, int s
)
239 /* generate jmp to a label */
240 #define gjmp2(instr,lbl) oad(instr,lbl)
242 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, long c
)
245 greloca(cur_text_section
, sym
, ind
, R_X86_64_32S
, c
), c
=0;
249 /* output constant with relocation if 'r & VT_SYM' is true */
250 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
253 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
257 /* output constant with relocation if 'r & VT_SYM' is true */
258 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, long c
)
261 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
265 /* output got address with relocation */
266 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
269 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
270 get_tok_str(sym
->v
, NULL
), c
, r
,
271 cur_text_section
->data
[ind
-3],
272 cur_text_section
->data
[ind
-2],
273 cur_text_section
->data
[ind
-1]
276 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
279 /* we use add c, %xxx for displacement */
281 o(0xc0 + REG_VALUE(r
));
286 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
288 op_reg
= REG_VALUE(op_reg
) << 3;
289 if ((r
& VT_VALMASK
) == VT_CONST
) {
290 /* constant memory reference */
293 gen_gotpcrel(r
, sym
, c
);
295 gen_addrpc32(r
, sym
, c
);
297 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
298 /* currently, we use only ebp as base */
300 /* short reference */
304 oad(0x85 | op_reg
, c
);
306 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
308 g(0x80 | op_reg
| REG_VALUE(r
));
311 g(0x00 | op_reg
| REG_VALUE(r
));
314 g(0x00 | op_reg
| REG_VALUE(r
));
318 /* generate a modrm reference. 'op_reg' contains the addtional 3
320 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
322 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
325 /* generate a modrm reference. 'op_reg' contains the addtional 3
327 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
330 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
331 orex(1, r
, op_reg
, opcode
);
332 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
336 /* load 'r' from value 'sv' */
337 void load(int r
, SValue
*sv
)
339 int v
, t
, ft
, fc
, fr
;
344 sv
= pe_getimport(sv
, &v2
);
348 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
350 if (fc
!= sv
->c
.i
&& (fr
& VT_SYM
))
351 tcc_error("64 bit addend in load");
353 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
355 #ifndef TCC_TARGET_PE
356 /* we use indirect access via got */
357 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
358 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
359 /* use the result register as a temporal register */
360 int tr
= r
| TREG_MEM
;
362 /* we cannot use float registers as a temporal register */
363 tr
= get_reg(RC_INT
) | TREG_MEM
;
365 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
367 /* load from the temporal register */
375 if (v
== VT_LLOCAL
) {
377 v1
.r
= VT_LOCAL
| VT_LVAL
;
380 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
381 fr
= get_reg(RC_INT
);
385 /* Like GCC we can load from small enough properly sized
386 structs and unions as well.
387 XXX maybe move to generic operand handling, but should
388 occur only with asm, so tccasm.c might also be a better place */
389 if ((ft
& VT_BTYPE
) == VT_STRUCT
) {
391 switch (type_size(&sv
->type
, &align
)) {
392 case 1: ft
= VT_BYTE
; break;
393 case 2: ft
= VT_SHORT
; break;
394 case 4: ft
= VT_INT
; break;
395 case 8: ft
= VT_LLONG
; break;
397 tcc_error("invalid aggregate type for register load");
401 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
403 r
= REG_VALUE(r
); /* movd */
404 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
405 b
= 0x7e0ff3; /* movq */
407 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
408 b
= 0xdb, r
= 5; /* fldt */
409 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
410 b
= 0xbe0f; /* movsbl */
411 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
412 b
= 0xb60f; /* movzbl */
413 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
414 b
= 0xbf0f; /* movswl */
415 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
416 b
= 0xb70f; /* movzwl */
418 assert(((ft
& VT_BTYPE
) == VT_INT
) || ((ft
& VT_BTYPE
) == VT_LLONG
)
419 || ((ft
& VT_BTYPE
) == VT_PTR
) || ((ft
& VT_BTYPE
) == VT_ENUM
)
420 || ((ft
& VT_BTYPE
) == VT_FUNC
));
425 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
428 gen_modrm(r
, fr
, sv
->sym
, fc
);
435 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
436 gen_addrpc32(fr
, sv
->sym
, fc
);
438 if (sv
->sym
->type
.t
& VT_STATIC
) {
440 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
441 gen_addrpc32(fr
, sv
->sym
, fc
);
444 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
445 gen_gotpcrel(r
, sv
->sym
, fc
);
448 } else if (is64_type(ft
)) {
449 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
452 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
455 } else if (v
== VT_LOCAL
) {
456 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
457 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
458 } else if (v
== VT_CMP
) {
460 if ((fc
& ~0x100) != TOK_NE
)
461 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
463 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
466 /* This was a float compare. If the parity bit is
467 set the result was unordered, meaning false for everything
468 except TOK_NE, and true for TOK_NE. */
470 o(0x037a + (REX_BASE(r
) << 8));
472 orex(0,r
,0, 0x0f); /* setxx %br */
474 o(0xc0 + REG_VALUE(r
));
475 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
478 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
479 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
482 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
484 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
486 /* gen_cvt_ftof(VT_DOUBLE); */
487 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
488 /* movsd -0x10(%rsp),%xmmN */
490 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
493 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
494 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
497 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
500 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
502 } else if (r
== TREG_ST0
) {
503 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
504 /* gen_cvt_ftof(VT_LDOUBLE); */
505 /* movsd %xmmN,-0x10(%rsp) */
507 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
509 o(0xf02444dd); /* fldl -0x10(%rsp) */
512 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
518 /* store register 'r' in lvalue 'v' */
519 void store(int r
, SValue
*v
)
523 /* store the REX prefix in this variable when PIC is enabled */
528 v
= pe_getimport(v
, &v2
);
531 fr
= v
->r
& VT_VALMASK
;
534 if (fc
!= v
->c
.i
&& (fr
& VT_SYM
))
535 tcc_error("64 bit addend in store");
536 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
539 #ifndef TCC_TARGET_PE
540 /* we need to access the variable via got */
541 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
542 /* mov xx(%rip), %r11 */
544 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
545 pic
= is64_type(bt
) ? 0x49 : 0x41;
549 /* XXX: incorrect if float reg to reg */
550 if (bt
== VT_FLOAT
) {
553 o(0x7e0f); /* movd */
555 } else if (bt
== VT_DOUBLE
) {
558 o(0xd60f); /* movq */
560 } else if (bt
== VT_LDOUBLE
) {
561 o(0xc0d9); /* fld %st(0) */
569 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
571 else if (is64_type(bt
))
577 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
582 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
583 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
584 } else if (fr
!= r
) {
585 /* XXX: don't we really come here? */
587 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
590 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
591 gen_modrm(r
, v
->r
, v
->sym
, fc
);
592 } else if (fr
!= r
) {
593 /* XXX: don't we really come here? */
595 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
600 /* 'is_jmp' is '1' if it is a jump */
601 static void gcall_or_jmp(int is_jmp
)
604 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
605 ((vtop
->r
& VT_SYM
) || (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
607 if (vtop
->r
& VT_SYM
) {
608 /* relocation case */
610 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
612 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
615 /* put an empty PC32 relocation */
616 put_elf_reloca(symtab_section
, cur_text_section
,
617 ind
+ 1, R_X86_64_PC32
, 0, (int)(vtop
->c
.i
-4));
619 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
621 /* otherwise, indirect call */
625 o(0xff); /* call/jmp *r */
626 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
630 #if defined(CONFIG_TCC_BCHECK)
631 #ifndef TCC_TARGET_PE
632 static addr_t func_bound_offset
;
633 static unsigned long func_bound_ind
;
636 static void gen_static_call(int v
)
638 Sym
*sym
= external_global_sym(v
, &func_old_type
, 0);
640 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
643 /* generate a bounded pointer addition */
644 ST_FUNC
void gen_bounded_ptr_add(void)
646 /* save all temporary registers */
649 /* prepare fast x86_64 function call */
651 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
655 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
658 /* do a fast function call */
659 gen_static_call(TOK___bound_ptr_add
);
661 /* returned pointer is in rax */
663 vtop
->r
= TREG_RAX
| VT_BOUNDED
;
666 /* relocation offset of the bounding function call point */
667 vtop
->c
.i
= (cur_text_section
->reloc
->data_offset
- sizeof(ElfW(Rela
)));
670 /* patch pointer addition in vtop so that pointer dereferencing is
672 ST_FUNC
void gen_bounded_ptr_deref(void)
680 /* XXX: put that code in generic part of tcc */
681 if (!is_float(vtop
->type
.t
)) {
682 if (vtop
->r
& VT_LVAL_BYTE
)
684 else if (vtop
->r
& VT_LVAL_SHORT
)
688 size
= type_size(&vtop
->type
, &align
);
690 case 1: func
= TOK___bound_ptr_indir1
; break;
691 case 2: func
= TOK___bound_ptr_indir2
; break;
692 case 4: func
= TOK___bound_ptr_indir4
; break;
693 case 8: func
= TOK___bound_ptr_indir8
; break;
694 case 12: func
= TOK___bound_ptr_indir12
; break;
695 case 16: func
= TOK___bound_ptr_indir16
; break;
697 tcc_error("unhandled size when dereferencing bounded pointer");
702 sym
= external_global_sym(func
, &func_old_type
, 0);
704 put_extern_sym(sym
, NULL
, 0, 0);
706 /* patch relocation */
707 /* XXX: find a better solution ? */
709 rel
= (ElfW(Rela
) *)(cur_text_section
->reloc
->data
+ vtop
->c
.i
);
710 rel
->r_info
= ELF64_R_INFO(sym
->c
, ELF64_R_TYPE(rel
->r_info
));
717 static const uint8_t arg_regs
[REGN
] = {
718 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
721 /* Prepare arguments in R10 and R11 rather than RCX and RDX
722 because gv() will not ever use these */
723 static int arg_prepare_reg(int idx
) {
724 if (idx
== 0 || idx
== 1)
725 /* idx=0: r10, idx=1: r11 */
728 return arg_regs
[idx
];
731 static int func_scratch
;
733 /* Generate function call. The function address is pushed first, then
734 all the parameters in call order. This functions pops all the
735 parameters and the function address. */
737 void gen_offs_sp(int b
, int r
, int d
)
739 orex(1,0,r
& 0x100 ? 0 : r
, b
);
741 o(0x2444 | (REG_VALUE(r
) << 3));
744 o(0x2484 | (REG_VALUE(r
) << 3));
749 /* Return the number of registers needed to return the struct, or 0 if
750 returning via struct pointer. */
751 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
754 *ret_align
= 1; // Never have to re-align return values for x86-64
756 size
= type_size(vt
, &align
);
757 if (size
> 8 || (size
& (size
- 1)))
771 static int is_sse_float(int t
) {
774 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
777 int gfunc_arg_size(CType
*type
) {
779 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
781 return type_size(type
, &align
);
784 void gfunc_call(int nb_args
)
786 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
789 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
792 /* for struct arguments, we need to call memcpy and the function
793 call breaks register passing arguments we are preparing.
794 So, we process arguments which will be passed by stack first. */
795 struct_size
= args_size
;
796 for(i
= 0; i
< nb_args
; i
++) {
801 bt
= (sv
->type
.t
& VT_BTYPE
);
802 size
= gfunc_arg_size(&sv
->type
);
805 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
807 if (bt
== VT_STRUCT
) {
808 /* align to stack align size */
809 size
= (size
+ 15) & ~15;
810 /* generate structure store */
812 gen_offs_sp(0x8d, r
, struct_size
);
815 /* generate memcpy call */
816 vset(&sv
->type
, r
| VT_LVAL
, 0);
820 } else if (bt
== VT_LDOUBLE
) {
822 gen_offs_sp(0xdb, 0x107, struct_size
);
827 if (func_scratch
< struct_size
)
828 func_scratch
= struct_size
;
831 struct_size
= args_size
;
833 for(i
= 0; i
< nb_args
; i
++) {
835 bt
= (vtop
->type
.t
& VT_BTYPE
);
837 size
= gfunc_arg_size(&vtop
->type
);
839 /* align to stack align size */
840 size
= (size
+ 15) & ~15;
843 gen_offs_sp(0x8d, d
, struct_size
);
844 gen_offs_sp(0x89, d
, arg
*8);
846 d
= arg_prepare_reg(arg
);
847 gen_offs_sp(0x8d, d
, struct_size
);
851 if (is_sse_float(vtop
->type
.t
)) {
852 if (tcc_state
->nosse
)
853 tcc_error("SSE disabled");
854 gv(RC_XMM0
); /* only use one float register */
856 /* movq %xmm0, j*8(%rsp) */
857 gen_offs_sp(0xd60f66, 0x100, arg
*8);
859 /* movaps %xmm0, %xmmN */
861 o(0xc0 + (arg
<< 3));
862 d
= arg_prepare_reg(arg
);
863 /* mov %xmm0, %rxx */
866 o(0xc0 + REG_VALUE(d
));
869 if (bt
== VT_STRUCT
) {
870 vtop
->type
.ref
= NULL
;
871 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
872 : size
> 1 ? VT_SHORT
: VT_BYTE
;
877 gen_offs_sp(0x89, r
, arg
*8);
879 d
= arg_prepare_reg(arg
);
880 orex(1,d
,r
,0x89); /* mov */
881 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
889 /* Copy R10 and R11 into RCX and RDX, respectively */
891 o(0xd1894c); /* mov %r10, %rcx */
893 o(0xda894c); /* mov %r11, %rdx */
898 /* other compilers don't clear the upper bits when returning char/short */
899 bt
= vtop
->type
.ref
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
);
900 if (bt
== (VT_BYTE
| VT_UNSIGNED
))
901 o(0xc0b60f); /* movzbl %al, %eax */
902 else if (bt
== VT_BYTE
)
903 o(0xc0be0f); /* movsbl %al, %eax */
904 else if (bt
== VT_SHORT
)
906 else if (bt
== (VT_SHORT
| VT_UNSIGNED
))
907 o(0xc0b70f); /* movzbl %al, %eax */
908 #if 0 /* handled in gen_cast() */
909 else if (bt
== VT_INT
)
910 o(0x9848); /* cltq */
911 else if (bt
== (VT_INT
| VT_UNSIGNED
))
912 o(0xc089); /* mov %eax,%eax */
918 #define FUNC_PROLOG_SIZE 11
920 /* generate function prolog of type 't' */
921 void gfunc_prolog(CType
*func_type
)
923 int addr
, reg_param_index
, bt
, size
;
932 ind
+= FUNC_PROLOG_SIZE
;
933 func_sub_sp_offset
= ind
;
936 sym
= func_type
->ref
;
938 /* if the function returns a structure, then add an
939 implicit pointer parameter */
941 func_var
= (sym
->c
== FUNC_ELLIPSIS
);
942 size
= gfunc_arg_size(&func_vt
);
944 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
950 /* define parameters */
951 while ((sym
= sym
->next
) != NULL
) {
953 bt
= type
->t
& VT_BTYPE
;
954 size
= gfunc_arg_size(type
);
956 if (reg_param_index
< REGN
) {
957 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
959 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LLOCAL
| VT_LVAL
, addr
);
961 if (reg_param_index
< REGN
) {
962 /* save arguments passed by register */
963 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
964 if (tcc_state
->nosse
)
965 tcc_error("SSE disabled");
966 o(0xd60f66); /* movq */
967 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
969 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
972 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
978 while (reg_param_index
< REGN
) {
979 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
980 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
987 /* generate function epilog */
988 void gfunc_epilog(void)
993 if (func_ret_sub
== 0) {
998 g(func_ret_sub
>> 8);
1002 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1003 /* align local size to word & save local variables */
1004 v
= (func_scratch
+ -loc
+ 15) & -16;
1007 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
1008 oad(0xb8, v
); /* mov stacksize, %eax */
1009 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1010 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
1011 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1013 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1014 o(0xec8148); /* sub rsp, stacksize */
1018 cur_text_section
->data_offset
= saved_ind
;
1019 pe_add_unwind_data(ind
, saved_ind
, v
);
1020 ind
= cur_text_section
->data_offset
;
1025 static void gadd_sp(int val
)
1027 if (val
== (char)val
) {
1031 oad(0xc48148, val
); /* add $xxx, %rsp */
1035 typedef enum X86_64_Mode
{
1038 x86_64_mode_integer
,
1043 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1047 else if (a
== x86_64_mode_none
)
1049 else if (b
== x86_64_mode_none
)
1051 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1052 return x86_64_mode_memory
;
1053 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1054 return x86_64_mode_integer
;
1055 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1056 return x86_64_mode_memory
;
1058 return x86_64_mode_sse
;
1061 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1066 switch (ty
->t
& VT_BTYPE
) {
1067 case VT_VOID
: return x86_64_mode_none
;
1076 case VT_ENUM
: return x86_64_mode_integer
;
1079 case VT_DOUBLE
: return x86_64_mode_sse
;
1081 case VT_LDOUBLE
: return x86_64_mode_x87
;
1086 mode
= x86_64_mode_none
;
1087 for (f
= f
->next
; f
; f
= f
->next
)
1088 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1096 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1099 int size
, align
, ret_t
= 0;
1101 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1106 mode
= x86_64_mode_integer
;
1108 size
= type_size(ty
, &align
);
1109 *psize
= (size
+ 7) & ~7;
1110 *palign
= (align
+ 7) & ~7;
1113 mode
= x86_64_mode_memory
;
1115 mode
= classify_x86_64_inner(ty
);
1117 case x86_64_mode_integer
:
1123 ret_t
= (size
> 4) ? VT_LLONG
: VT_INT
;
1127 case x86_64_mode_x87
:
1132 case x86_64_mode_sse
:
1138 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1141 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1154 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1156 /* This definition must be synced with stdarg.h */
1157 enum __va_arg_type
{
1158 __va_gen_reg
, __va_float_reg
, __va_stack
1160 int size
, align
, reg_count
;
1161 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1163 default: return __va_stack
;
1164 case x86_64_mode_integer
: return __va_gen_reg
;
1165 case x86_64_mode_sse
: return __va_float_reg
;
1169 /* Return the number of registers needed to return the struct, or 0 if
1170 returning via struct pointer. */
1171 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1173 int size
, align
, reg_count
;
1174 *ret_align
= 1; // Never have to re-align return values for x86-64
1176 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1180 static const uint8_t arg_regs
[REGN
] = {
1181 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1184 static int arg_prepare_reg(int idx
) {
1185 if (idx
== 2 || idx
== 3)
1186 /* idx=2: r10, idx=3: r11 */
1189 return arg_regs
[idx
];
1192 /* Generate function call. The function address is pushed first, then
1193 all the parameters in call order. This functions pops all the
1194 parameters and the function address. */
1195 void gfunc_call(int nb_args
)
1199 int size
, align
, r
, args_size
, stack_adjust
, i
, reg_count
;
1200 int nb_reg_args
= 0;
1201 int nb_sse_args
= 0;
1202 int sse_reg
, gen_reg
;
1203 char _onstack
[nb_args
], *onstack
= _onstack
;
1205 /* calculate the number of integer/float register arguments, remember
1206 arguments to be passed via stack (in onstack[]), and also remember
1207 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1208 to be done in a left-to-right pass over arguments. */
1210 for(i
= nb_args
- 1; i
>= 0; i
--) {
1211 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1212 if (mode
== x86_64_mode_sse
&& nb_sse_args
+ reg_count
<= 8) {
1213 nb_sse_args
+= reg_count
;
1215 } else if (mode
== x86_64_mode_integer
&& nb_reg_args
+ reg_count
<= REGN
) {
1216 nb_reg_args
+= reg_count
;
1218 } else if (mode
== x86_64_mode_none
) {
1221 if (align
== 16 && (stack_adjust
&= 15)) {
1226 stack_adjust
+= size
;
1230 if (nb_sse_args
&& tcc_state
->nosse
)
1231 tcc_error("SSE disabled but floating point arguments passed");
1233 /* fetch cpu flag before generating any code */
1234 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
1237 /* for struct arguments, we need to call memcpy and the function
1238 call breaks register passing arguments we are preparing.
1239 So, we process arguments which will be passed by stack first. */
1240 gen_reg
= nb_reg_args
;
1241 sse_reg
= nb_sse_args
;
1244 for (i
= 0; i
< nb_args
;) {
1245 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1250 /* Possibly adjust stack to align SSE boundary. We're processing
1251 args from right to left while allocating happens left to right
1252 (stack grows down), so the adjustment needs to happen _after_
1253 an argument that requires it. */
1255 o(0x50); /* push %rax; aka sub $8,%rsp */
1259 if (onstack
[i
] == 2)
1264 switch (vtop
->type
.t
& VT_BTYPE
) {
1266 /* allocate the necessary size on stack */
1268 oad(0xec81, size
); /* sub $xxx, %rsp */
1269 /* generate structure store */
1270 r
= get_reg(RC_INT
);
1271 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1272 o(0xe0 + REG_VALUE(r
));
1273 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1280 oad(0xec8148, size
); /* sub $xxx, %rsp */
1281 o(0x7cdb); /* fstpt 0(%rsp) */
1288 assert(mode
== x86_64_mode_sse
);
1290 o(0x50); /* push $rax */
1291 /* movq %xmmN, (%rsp) */
1293 o(0x04 + REG_VALUE(r
)*8);
1298 assert(mode
== x86_64_mode_integer
);
1300 /* XXX: implicit cast ? */
1302 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1312 /* XXX This should be superfluous. */
1313 save_regs(0); /* save used temporary registers */
1315 /* then, we prepare register passing arguments.
1316 Note that we cannot set RDX and RCX in this loop because gv()
1317 may break these temporary registers. Let's use R10 and R11
1319 assert(gen_reg
<= REGN
);
1320 assert(sse_reg
<= 8);
1321 for(i
= 0; i
< nb_args
; i
++) {
1322 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1323 /* Alter stack entry type so that gv() knows how to treat it */
1325 if (mode
== x86_64_mode_sse
) {
1326 if (reg_count
== 2) {
1328 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1329 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1330 /* movaps %xmm0, %xmmN */
1332 o(0xc0 + (sse_reg
<< 3));
1333 /* movaps %xmm1, %xmmN */
1335 o(0xc1 + ((sse_reg
+1) << 3));
1338 assert(reg_count
== 1);
1340 /* Load directly to register */
1341 gv(RC_XMM0
<< sse_reg
);
1343 } else if (mode
== x86_64_mode_integer
) {
1345 /* XXX: implicit cast ? */
1347 gen_reg
-= reg_count
;
1349 d
= arg_prepare_reg(gen_reg
);
1350 orex(1,d
,r
,0x89); /* mov */
1351 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1352 if (reg_count
== 2) {
1353 d
= arg_prepare_reg(gen_reg
+1);
1354 orex(1,d
,vtop
->r2
,0x89); /* mov */
1355 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1360 assert(gen_reg
== 0);
1361 assert(sse_reg
== 0);
1363 /* We shouldn't have many operands on the stack anymore, but the
1364 call address itself is still there, and it might be in %eax
1365 (or edx/ecx) currently, which the below writes would clobber.
1366 So evict all remaining operands here. */
1369 /* Copy R10 and R11 into RDX and RCX, respectively */
1370 if (nb_reg_args
> 2) {
1371 o(0xd2894c); /* mov %r10, %rdx */
1372 if (nb_reg_args
> 3) {
1373 o(0xd9894c); /* mov %r11, %rcx */
1377 if (vtop
->type
.ref
->c
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1378 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1386 #define FUNC_PROLOG_SIZE 11
1388 static void push_arg_reg(int i
) {
1390 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1393 /* generate function prolog of type 't' */
1394 void gfunc_prolog(CType
*func_type
)
1397 int i
, addr
, align
, size
, reg_count
;
1398 int param_addr
= 0, reg_param_index
, sse_param_index
;
1402 sym
= func_type
->ref
;
1403 addr
= PTR_SIZE
* 2;
1405 ind
+= FUNC_PROLOG_SIZE
;
1406 func_sub_sp_offset
= ind
;
1409 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
1410 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1411 seen_reg_num
= seen_sse_num
= 0;
1412 /* frame pointer and return address */
1413 seen_stack_size
= PTR_SIZE
* 2;
1414 /* count the number of seen parameters */
1415 sym
= func_type
->ref
;
1416 while ((sym
= sym
->next
) != NULL
) {
1418 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1422 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1425 case x86_64_mode_integer
:
1426 if (seen_reg_num
+ reg_count
> REGN
)
1428 seen_reg_num
+= reg_count
;
1431 case x86_64_mode_sse
:
1432 if (seen_sse_num
+ reg_count
> 8)
1434 seen_sse_num
+= reg_count
;
1440 /* movl $0x????????, -0x10(%rbp) */
1442 gen_le32(seen_reg_num
* 8);
1443 /* movl $0x????????, -0xc(%rbp) */
1445 gen_le32(seen_sse_num
* 16 + 48);
1446 /* movl $0x????????, -0x8(%rbp) */
1448 gen_le32(seen_stack_size
);
1450 /* save all register passing arguments */
1451 for (i
= 0; i
< 8; i
++) {
1453 if (!tcc_state
->nosse
) {
1454 o(0xd60f66); /* movq */
1455 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1457 /* movq $0, loc+8(%rbp) */
1462 for (i
= 0; i
< REGN
; i
++) {
1463 push_arg_reg(REGN
-1-i
);
1467 sym
= func_type
->ref
;
1468 reg_param_index
= 0;
1469 sse_param_index
= 0;
1471 /* if the function returns a structure, then add an
1472 implicit pointer parameter */
1473 func_vt
= sym
->type
;
1474 mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1475 if (mode
== x86_64_mode_memory
) {
1476 push_arg_reg(reg_param_index
);
1480 /* define parameters */
1481 while ((sym
= sym
->next
) != NULL
) {
1483 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1485 case x86_64_mode_sse
:
1486 if (tcc_state
->nosse
)
1487 tcc_error("SSE disabled but floating point arguments used");
1488 if (sse_param_index
+ reg_count
<= 8) {
1489 /* save arguments passed by register */
1490 loc
-= reg_count
* 8;
1492 for (i
= 0; i
< reg_count
; ++i
) {
1493 o(0xd60f66); /* movq */
1494 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1498 addr
= (addr
+ align
- 1) & -align
;
1504 case x86_64_mode_memory
:
1505 case x86_64_mode_x87
:
1506 addr
= (addr
+ align
- 1) & -align
;
1511 case x86_64_mode_integer
: {
1512 if (reg_param_index
+ reg_count
<= REGN
) {
1513 /* save arguments passed by register */
1514 loc
-= reg_count
* 8;
1516 for (i
= 0; i
< reg_count
; ++i
) {
1517 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1521 addr
= (addr
+ align
- 1) & -align
;
1527 default: break; /* nothing to be done for x86_64_mode_none */
1529 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1530 VT_LOCAL
| VT_LVAL
, param_addr
);
1533 #ifdef CONFIG_TCC_BCHECK
1534 /* leave some room for bound checking code */
1535 if (tcc_state
->do_bounds_check
) {
1536 func_bound_offset
= lbounds_section
->data_offset
;
1537 func_bound_ind
= ind
;
1538 oad(0xb8, 0); /* lbound section pointer */
1539 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1540 oad(0xb8, 0); /* call to function */
1545 /* generate function epilog */
1546 void gfunc_epilog(void)
1550 #ifdef CONFIG_TCC_BCHECK
1551 if (tcc_state
->do_bounds_check
1552 && func_bound_offset
!= lbounds_section
->data_offset
)
1558 /* add end of table info */
1559 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
1562 /* generate bound local allocation */
1563 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
1564 func_bound_offset
, lbounds_section
->data_offset
);
1566 ind
= func_bound_ind
;
1567 greloca(cur_text_section
, sym_data
, ind
+ 1, R_X86_64_64
, 0);
1569 gen_static_call(TOK___bound_local_new
);
1572 /* generate bound check local freeing */
1573 o(0x5250); /* save returned value, if any */
1574 greloca(cur_text_section
, sym_data
, ind
+ 1, R_X86_64_64
, 0);
1575 oad(0xb8, 0); /* mov xxx, %rax */
1576 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1577 gen_static_call(TOK___bound_local_delete
);
1578 o(0x585a); /* restore returned value, if any */
1581 o(0xc9); /* leave */
1582 if (func_ret_sub
== 0) {
1585 o(0xc2); /* ret n */
1587 g(func_ret_sub
>> 8);
1589 /* align local size to word & save local variables */
1590 v
= (-loc
+ 15) & -16;
1592 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1593 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1594 o(0xec8148); /* sub rsp, stacksize */
1601 /* generate a jump to a label */
1604 return gjmp2(0xe9, t
);
1607 /* generate a jump to a fixed address */
1608 void gjmp_addr(int a
)
1616 oad(0xe9, a
- ind
- 5);
1620 ST_FUNC
void gtst_addr(int inv
, int a
)
1622 int v
= vtop
->r
& VT_VALMASK
;
1624 inv
^= (vtop
--)->c
.i
;
1631 oad(inv
- 16, a
- 4);
1633 } else if ((v
& ~1) == VT_JMP
) {
1634 if ((v
& 1) != inv
) {
1646 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1647 ST_FUNC
int gtst(int inv
, int t
)
1649 int v
= vtop
->r
& VT_VALMASK
;
1651 if (nocode_wanted
) {
1653 } else if (v
== VT_CMP
) {
1654 /* fast case : can jump directly since flags are set */
1655 if (vtop
->c
.i
& 0x100)
1657 /* This was a float compare. If the parity flag is set
1658 the result was unordered. For anything except != this
1659 means false and we don't jump (anding both conditions).
1660 For != this means true (oring both).
1661 Take care about inverting the test. We need to jump
1662 to our target if the result was unordered and test wasn't NE,
1663 otherwise if unordered we don't want to jump. */
1664 vtop
->c
.i
&= ~0x100;
1665 if (inv
== (vtop
->c
.i
== TOK_NE
))
1666 o(0x067a); /* jp +6 */
1670 t
= gjmp2(0x8a, t
); /* jp t */
1674 t
= gjmp2((vtop
->c
.i
- 16) ^ inv
, t
);
1675 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1676 /* && or || optimization */
1677 if ((v
& 1) == inv
) {
1678 /* insert vtop->c jump list in t */
1679 uint32_t n1
, n
= vtop
->c
.i
;
1681 while ((n1
= read32le(cur_text_section
->data
+ n
)))
1683 write32le(cur_text_section
->data
+ n
, t
);
1695 /* generate an integer binary operation */
1696 void gen_opi(int op
)
1701 ll
= is64_type(vtop
[-1].type
.t
);
1702 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1703 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1707 case TOK_ADDC1
: /* add with carry generation */
1710 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1717 /* XXX: generate inc and dec for smaller code ? */
1718 orex(ll
, r
, 0, 0x83);
1719 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1722 orex(ll
, r
, 0, 0x81);
1723 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1726 gv2(RC_INT
, RC_INT
);
1729 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1730 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1733 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1739 case TOK_SUBC1
: /* sub with carry generation */
1742 case TOK_ADDC2
: /* add with carry use */
1745 case TOK_SUBC2
: /* sub with carry use */
1758 gv2(RC_INT
, RC_INT
);
1761 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1762 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1774 opc
= 0xc0 | (opc
<< 3);
1780 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1781 o(opc
| REG_VALUE(r
));
1782 g(vtop
->c
.i
& (ll
? 63 : 31));
1784 /* we generate the shift in ecx */
1785 gv2(RC_INT
, RC_RCX
);
1787 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1788 o(opc
| REG_VALUE(r
));
1801 /* first operand must be in eax */
1802 /* XXX: need better constraint for second operand */
1803 gv2(RC_RAX
, RC_RCX
);
1808 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1809 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1810 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1811 if (op
== '%' || op
== TOK_UMOD
)
1823 void gen_opl(int op
)
1828 /* generate a floating point operation 'v = t1 op t2' instruction. The
1829 two operands are guaranteed to have the same floating point type */
1830 /* XXX: need to use ST1 too */
1831 void gen_opf(int op
)
1833 int a
, ft
, fc
, swapped
, r
;
1835 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1837 /* convert constants to memory references */
1838 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1843 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1846 /* must put at least one value in the floating point register */
1847 if ((vtop
[-1].r
& VT_LVAL
) &&
1848 (vtop
[0].r
& VT_LVAL
)) {
1854 /* swap the stack if needed so that t1 is the register and t2 is
1855 the memory reference */
1856 if (vtop
[-1].r
& VT_LVAL
) {
1860 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1861 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1862 /* load on stack second operand */
1863 load(TREG_ST0
, vtop
);
1864 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1865 if (op
== TOK_GE
|| op
== TOK_GT
)
1867 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1870 o(0xc9d9); /* fxch %st(1) */
1871 if (op
== TOK_EQ
|| op
== TOK_NE
)
1872 o(0xe9da); /* fucompp */
1874 o(0xd9de); /* fcompp */
1875 o(0xe0df); /* fnstsw %ax */
1877 o(0x45e480); /* and $0x45, %ah */
1878 o(0x40fC80); /* cmp $0x40, %ah */
1879 } else if (op
== TOK_NE
) {
1880 o(0x45e480); /* and $0x45, %ah */
1881 o(0x40f480); /* xor $0x40, %ah */
1883 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1884 o(0x05c4f6); /* test $0x05, %ah */
1887 o(0x45c4f6); /* test $0x45, %ah */
1894 /* no memory reference possible for long double operations */
1895 load(TREG_ST0
, vtop
);
1919 o(0xde); /* fxxxp %st, %st(1) */
1924 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1925 /* if saved lvalue, then we must reload it */
1928 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1930 r
= get_reg(RC_INT
);
1932 v1
.r
= VT_LOCAL
| VT_LVAL
;
1938 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1941 if (op
== TOK_LE
|| op
== TOK_LT
)
1943 if (op
== TOK_LE
|| op
== TOK_GE
) {
1944 op
= 0x93; /* setae */
1946 op
= 0x97; /* seta */
1954 assert(!(vtop
[-1].r
& VT_LVAL
));
1956 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1958 if (op
== TOK_EQ
|| op
== TOK_NE
)
1959 o(0x2e0f); /* ucomisd */
1961 o(0x2f0f); /* comisd */
1963 if (vtop
->r
& VT_LVAL
) {
1964 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
1966 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
1971 vtop
->c
.i
= op
| 0x100;
1973 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
1991 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
1994 /* if saved lvalue, then we must reload it */
1995 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
1997 r
= get_reg(RC_INT
);
1999 v1
.r
= VT_LOCAL
| VT_LVAL
;
2005 assert(!(vtop
[-1].r
& VT_LVAL
));
2007 assert(vtop
->r
& VT_LVAL
);
2012 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2020 if (vtop
->r
& VT_LVAL
) {
2021 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2023 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2031 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2032 and 'long long' cases. */
2033 void gen_cvt_itof(int t
)
2035 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2038 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2039 /* signed long long to float/double/long double (unsigned case
2040 is handled generically) */
2041 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2042 o(0x242cdf); /* fildll (%rsp) */
2043 o(0x08c48348); /* add $8, %rsp */
2044 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2045 (VT_INT
| VT_UNSIGNED
)) {
2046 /* unsigned int to float/double/long double */
2047 o(0x6a); /* push $0 */
2049 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2050 o(0x242cdf); /* fildll (%rsp) */
2051 o(0x10c48348); /* add $16, %rsp */
2053 /* int to float/double/long double */
2054 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2055 o(0x2404db); /* fildl (%rsp) */
2056 o(0x08c48348); /* add $8, %rsp */
2060 int r
= get_reg(RC_FLOAT
);
2062 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2063 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2064 (VT_INT
| VT_UNSIGNED
) ||
2065 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2069 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2074 /* convert from one floating point type to another */
2075 void gen_cvt_ftof(int t
)
2083 if (bt
== VT_FLOAT
) {
2085 if (tbt
== VT_DOUBLE
) {
2086 o(0x140f); /* unpcklps */
2087 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2088 o(0x5a0f); /* cvtps2pd */
2089 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2090 } else if (tbt
== VT_LDOUBLE
) {
2092 /* movss %xmm0,-0x10(%rsp) */
2094 o(0x44 + REG_VALUE(vtop
->r
)*8);
2096 o(0xf02444d9); /* flds -0x10(%rsp) */
2099 } else if (bt
== VT_DOUBLE
) {
2101 if (tbt
== VT_FLOAT
) {
2102 o(0x140f66); /* unpcklpd */
2103 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2104 o(0x5a0f66); /* cvtpd2ps */
2105 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2106 } else if (tbt
== VT_LDOUBLE
) {
2108 /* movsd %xmm0,-0x10(%rsp) */
2110 o(0x44 + REG_VALUE(vtop
->r
)*8);
2112 o(0xf02444dd); /* fldl -0x10(%rsp) */
2118 r
= get_reg(RC_FLOAT
);
2119 if (tbt
== VT_DOUBLE
) {
2120 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2121 /* movsd -0x10(%rsp),%xmm0 */
2123 o(0x44 + REG_VALUE(r
)*8);
2126 } else if (tbt
== VT_FLOAT
) {
2127 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2128 /* movss -0x10(%rsp),%xmm0 */
2130 o(0x44 + REG_VALUE(r
)*8);
2137 /* convert fp to int 't' type */
2138 void gen_cvt_ftoi(int t
)
2140 int ft
, bt
, size
, r
;
2143 if (bt
== VT_LDOUBLE
) {
2144 gen_cvt_ftof(VT_DOUBLE
);
2154 r
= get_reg(RC_INT
);
2155 if (bt
== VT_FLOAT
) {
2157 } else if (bt
== VT_DOUBLE
) {
2162 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2163 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2167 /* computed goto support */
2174 /* Save the stack pointer onto the stack and return the location of its address */
2175 ST_FUNC
void gen_vla_sp_save(int addr
) {
2176 /* mov %rsp,addr(%rbp)*/
2177 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2180 /* Restore the SP from a location on the stack */
2181 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2182 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2185 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2186 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2187 #ifdef TCC_TARGET_PE
2188 /* alloca does more than just adjust %rsp on Windows */
2189 vpush_global_sym(&func_old_type
, TOK_alloca
);
2190 vswap(); /* Move alloca ref past allocation size */
2194 r
= gv(RC_INT
); /* allocation size */
2197 o(0xe0 | REG_VALUE(r
));
2198 /* We align to 16 bytes rather than align */
2206 /* end of x86-64 code generator */
2207 /*************************************************************/
2208 #endif /* ! TARGET_DEFS_ONLY */
2209 /******************************************************/