2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
37 #define RC_ST0 0x0080 /* only for long double */
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
104 /******************************************************/
107 #define EM_TCC_TARGET EM_X86_64
109 /* relocation type for 32 bit data relocation */
110 #define R_DATA_32 R_X86_64_32
111 #define R_DATA_PTR R_X86_64_64
112 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
113 #define R_COPY R_X86_64_COPY
115 #define ELF_START_ADDR 0x400000
116 #define ELF_PAGE_SIZE 0x200000
118 /******************************************************/
119 #else /* ! TARGET_DEFS_ONLY */
120 /******************************************************/
124 ST_DATA
const int reg_classes
[NB_REGS
] = {
125 /* eax */ RC_INT
| RC_RAX
,
126 /* ecx */ RC_INT
| RC_RCX
,
127 /* edx */ RC_INT
| RC_RDX
,
141 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
142 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
143 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
144 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
145 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
146 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
147 /* xmm6 an xmm7 are included so gv() can be used on them,
148 but they are not tagged with RC_FLOAT because they are
149 callee saved on Windows */
155 static unsigned long func_sub_sp_offset
;
156 static int func_ret_sub
;
158 /* XXX: make it faster ? */
163 if (ind1
> cur_text_section
->data_allocated
)
164 section_realloc(cur_text_section
, ind1
);
165 cur_text_section
->data
[ind
] = c
;
169 void o(unsigned int c
)
191 void gen_le64(int64_t c
)
203 void orex(int ll
, int r
, int r2
, int b
)
205 if ((r
& VT_VALMASK
) >= VT_CONST
)
207 if ((r2
& VT_VALMASK
) >= VT_CONST
)
209 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
210 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
214 /* output a symbol and patch all calls to it */
215 void gsym_addr(int t
, int a
)
218 unsigned char *ptr
= cur_text_section
->data
+ t
;
219 uint32_t n
= read32le(ptr
); /* next value */
220 write32le(ptr
, a
- t
- 4);
230 /* psym is used to put an instruction with a data field which is a
231 reference to a symbol. It is in fact the same as oad ! */
234 static int is64_type(int t
)
236 return ((t
& VT_BTYPE
) == VT_PTR
||
237 (t
& VT_BTYPE
) == VT_FUNC
||
238 (t
& VT_BTYPE
) == VT_LLONG
);
241 /* instruction + 4 bytes data. Return the address of the data */
242 ST_FUNC
int oad(int c
, int s
)
248 if (ind1
> cur_text_section
->data_allocated
)
249 section_realloc(cur_text_section
, ind1
);
250 write32le(cur_text_section
->data
+ ind
, s
);
256 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
259 greloca(cur_text_section
, sym
, ind
, R_X86_64_32
, c
), c
=0;
263 /* output constant with relocation if 'r & VT_SYM' is true */
264 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
267 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
271 /* output constant with relocation if 'r & VT_SYM' is true */
272 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
275 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
279 /* output got address with relocation */
280 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
282 #ifndef TCC_TARGET_PE
283 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
285 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
286 get_tok_str(sym
->v
, NULL
), c
, r
,
287 cur_text_section
->data
[ind
-3],
288 cur_text_section
->data
[ind
-2],
289 cur_text_section
->data
[ind
-1]
291 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
295 /* we use add c, %xxx for displacement */
297 o(0xc0 + REG_VALUE(r
));
302 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
304 op_reg
= REG_VALUE(op_reg
) << 3;
305 if ((r
& VT_VALMASK
) == VT_CONST
) {
306 /* constant memory reference */
309 gen_gotpcrel(r
, sym
, c
);
311 gen_addrpc32(r
, sym
, c
);
313 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
314 /* currently, we use only ebp as base */
316 /* short reference */
320 oad(0x85 | op_reg
, c
);
322 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
324 g(0x80 | op_reg
| REG_VALUE(r
));
327 g(0x00 | op_reg
| REG_VALUE(r
));
330 g(0x00 | op_reg
| REG_VALUE(r
));
334 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
336 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
338 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
341 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
343 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
346 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
347 orex(1, r
, op_reg
, opcode
);
348 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
352 /* load 'r' from value 'sv' */
353 void load(int r
, SValue
*sv
)
355 int v
, t
, ft
, fc
, fr
;
360 sv
= pe_getimport(sv
, &v2
);
364 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
367 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
369 #ifndef TCC_TARGET_PE
370 /* we use indirect access via got */
371 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
372 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
373 /* use the result register as a temporal register */
374 int tr
= r
| TREG_MEM
;
376 /* we cannot use float registers as a temporal register */
377 tr
= get_reg(RC_INT
) | TREG_MEM
;
379 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
381 /* load from the temporal register */
389 if (v
== VT_LLOCAL
) {
391 v1
.r
= VT_LOCAL
| VT_LVAL
;
394 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
395 fr
= get_reg(RC_INT
);
399 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
401 r
= REG_VALUE(r
); /* movd */
402 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
403 b
= 0x7e0ff3; /* movq */
405 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
406 b
= 0xdb, r
= 5; /* fldt */
407 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
408 b
= 0xbe0f; /* movsbl */
409 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
410 b
= 0xb60f; /* movzbl */
411 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
412 b
= 0xbf0f; /* movswl */
413 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
414 b
= 0xb70f; /* movzwl */
416 assert(((ft
& VT_BTYPE
) == VT_INT
) || ((ft
& VT_BTYPE
) == VT_LLONG
)
417 || ((ft
& VT_BTYPE
) == VT_PTR
) || ((ft
& VT_BTYPE
) == VT_ENUM
)
418 || ((ft
& VT_BTYPE
) == VT_FUNC
));
423 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
426 gen_modrm(r
, fr
, sv
->sym
, fc
);
433 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
434 gen_addrpc32(fr
, sv
->sym
, fc
);
436 if (sv
->sym
->type
.t
& VT_STATIC
) {
438 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
439 gen_addrpc32(fr
, sv
->sym
, fc
);
442 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
443 gen_gotpcrel(r
, sv
->sym
, fc
);
446 } else if (is64_type(ft
)) {
447 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
450 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
453 } else if (v
== VT_LOCAL
) {
454 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
455 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
456 } else if (v
== VT_CMP
) {
458 if ((fc
& ~0x100) != TOK_NE
)
459 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
461 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
464 /* This was a float compare. If the parity bit is
465 set the result was unordered, meaning false for everything
466 except TOK_NE, and true for TOK_NE. */
468 o(0x037a + (REX_BASE(r
) << 8));
470 orex(0,r
,0, 0x0f); /* setxx %br */
472 o(0xc0 + REG_VALUE(r
));
473 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
476 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
477 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
480 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
482 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
484 /* gen_cvt_ftof(VT_DOUBLE); */
485 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
486 /* movsd -0x10(%rsp),%xmmN */
488 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
491 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
492 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
495 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
498 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
500 } else if (r
== TREG_ST0
) {
501 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
502 /* gen_cvt_ftof(VT_LDOUBLE); */
503 /* movsd %xmmN,-0x10(%rsp) */
505 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
507 o(0xf02444dd); /* fldl -0x10(%rsp) */
510 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
516 /* store register 'r' in lvalue 'v' */
517 void store(int r
, SValue
*v
)
521 /* store the REX prefix in this variable when PIC is enabled */
526 v
= pe_getimport(v
, &v2
);
531 fr
= v
->r
& VT_VALMASK
;
532 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
535 #ifndef TCC_TARGET_PE
536 /* we need to access the variable via got */
537 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
538 /* mov xx(%rip), %r11 */
540 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
541 pic
= is64_type(bt
) ? 0x49 : 0x41;
545 /* XXX: incorrect if float reg to reg */
546 if (bt
== VT_FLOAT
) {
549 o(0x7e0f); /* movd */
551 } else if (bt
== VT_DOUBLE
) {
554 o(0xd60f); /* movq */
556 } else if (bt
== VT_LDOUBLE
) {
557 o(0xc0d9); /* fld %st(0) */
565 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
567 else if (is64_type(bt
))
573 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
578 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
579 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
580 } else if (fr
!= r
) {
581 /* XXX: don't we really come here? */
583 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
586 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
587 gen_modrm(r
, v
->r
, v
->sym
, fc
);
588 } else if (fr
!= r
) {
589 /* XXX: don't we really come here? */
591 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
596 /* 'is_jmp' is '1' if it is a jump */
597 static void gcall_or_jmp(int is_jmp
)
600 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
601 ((vtop
->r
& VT_SYM
) || (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
603 if (vtop
->r
& VT_SYM
) {
604 /* relocation case */
606 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
608 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
611 /* put an empty PC32 relocation */
612 put_elf_reloca(symtab_section
, cur_text_section
,
613 ind
+ 1, R_X86_64_PC32
, 0, (int)(vtop
->c
.i
-4));
615 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
617 /* otherwise, indirect call */
621 o(0xff); /* call/jmp *r */
622 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
626 #if defined(CONFIG_TCC_BCHECK)
627 #ifndef TCC_TARGET_PE
628 static addr_t func_bound_offset
;
629 static unsigned long func_bound_ind
;
632 static void gen_static_call(int v
)
634 Sym
*sym
= external_global_sym(v
, &func_old_type
, 0);
636 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
639 /* generate a bounded pointer addition */
640 ST_FUNC
void gen_bounded_ptr_add(void)
642 /* save all temporary registers */
645 /* prepare fast x86_64 function call */
647 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
651 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
654 /* do a fast function call */
655 gen_static_call(TOK___bound_ptr_add
);
657 /* returned pointer is in rax */
659 vtop
->r
= TREG_RAX
| VT_BOUNDED
;
662 /* relocation offset of the bounding function call point */
663 vtop
->c
.i
= (cur_text_section
->reloc
->data_offset
- sizeof(ElfW(Rela
)));
666 /* patch pointer addition in vtop so that pointer dereferencing is
668 ST_FUNC
void gen_bounded_ptr_deref(void)
676 /* XXX: put that code in generic part of tcc */
677 if (!is_float(vtop
->type
.t
)) {
678 if (vtop
->r
& VT_LVAL_BYTE
)
680 else if (vtop
->r
& VT_LVAL_SHORT
)
684 size
= type_size(&vtop
->type
, &align
);
686 case 1: func
= TOK___bound_ptr_indir1
; break;
687 case 2: func
= TOK___bound_ptr_indir2
; break;
688 case 4: func
= TOK___bound_ptr_indir4
; break;
689 case 8: func
= TOK___bound_ptr_indir8
; break;
690 case 12: func
= TOK___bound_ptr_indir12
; break;
691 case 16: func
= TOK___bound_ptr_indir16
; break;
693 tcc_error("unhandled size when dereferencing bounded pointer");
698 sym
= external_global_sym(func
, &func_old_type
, 0);
700 put_extern_sym(sym
, NULL
, 0, 0);
702 /* patch relocation */
703 /* XXX: find a better solution ? */
705 rel
= (ElfW(Rela
) *)(cur_text_section
->reloc
->data
+ vtop
->c
.i
);
706 rel
->r_info
= ELF64_R_INFO(sym
->c
, ELF64_R_TYPE(rel
->r_info
));
713 static const uint8_t arg_regs
[REGN
] = {
714 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
717 /* Prepare arguments in R10 and R11 rather than RCX and RDX
718 because gv() will not ever use these */
719 static int arg_prepare_reg(int idx
) {
720 if (idx
== 0 || idx
== 1)
721 /* idx=0: r10, idx=1: r11 */
724 return arg_regs
[idx
];
727 static int func_scratch
;
729 /* Generate function call. The function address is pushed first, then
730 all the parameters in call order. This functions pops all the
731 parameters and the function address. */
733 void gen_offs_sp(int b
, int r
, int d
)
735 orex(1,0,r
& 0x100 ? 0 : r
, b
);
737 o(0x2444 | (REG_VALUE(r
) << 3));
740 o(0x2484 | (REG_VALUE(r
) << 3));
745 /* Return the number of registers needed to return the struct, or 0 if
746 returning via struct pointer. */
747 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
751 *ret_align
= 1; // Never have to re-align return values for x86-64
752 size
= type_size(vt
, &align
);
756 } else if (size
> 4) {
759 } else if (size
> 2) {
762 } else if (size
> 1) {
771 static int is_sse_float(int t
) {
774 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
777 int gfunc_arg_size(CType
*type
) {
779 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
781 return type_size(type
, &align
);
784 void gfunc_call(int nb_args
)
786 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
789 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
792 /* for struct arguments, we need to call memcpy and the function
793 call breaks register passing arguments we are preparing.
794 So, we process arguments which will be passed by stack first. */
795 struct_size
= args_size
;
796 for(i
= 0; i
< nb_args
; i
++) {
801 bt
= (sv
->type
.t
& VT_BTYPE
);
802 size
= gfunc_arg_size(&sv
->type
);
805 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
807 if (bt
== VT_STRUCT
) {
808 /* align to stack align size */
809 size
= (size
+ 15) & ~15;
810 /* generate structure store */
812 gen_offs_sp(0x8d, r
, struct_size
);
815 /* generate memcpy call */
816 vset(&sv
->type
, r
| VT_LVAL
, 0);
820 } else if (bt
== VT_LDOUBLE
) {
822 gen_offs_sp(0xdb, 0x107, struct_size
);
827 if (func_scratch
< struct_size
)
828 func_scratch
= struct_size
;
831 struct_size
= args_size
;
833 for(i
= 0; i
< nb_args
; i
++) {
835 bt
= (vtop
->type
.t
& VT_BTYPE
);
837 size
= gfunc_arg_size(&vtop
->type
);
839 /* align to stack align size */
840 size
= (size
+ 15) & ~15;
843 gen_offs_sp(0x8d, d
, struct_size
);
844 gen_offs_sp(0x89, d
, arg
*8);
846 d
= arg_prepare_reg(arg
);
847 gen_offs_sp(0x8d, d
, struct_size
);
851 if (is_sse_float(vtop
->type
.t
)) {
852 gv(RC_XMM0
); /* only use one float register */
854 /* movq %xmm0, j*8(%rsp) */
855 gen_offs_sp(0xd60f66, 0x100, arg
*8);
857 /* movaps %xmm0, %xmmN */
859 o(0xc0 + (arg
<< 3));
860 d
= arg_prepare_reg(arg
);
861 /* mov %xmm0, %rxx */
864 o(0xc0 + REG_VALUE(d
));
867 if (bt
== VT_STRUCT
) {
868 vtop
->type
.ref
= NULL
;
869 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
870 : size
> 1 ? VT_SHORT
: VT_BYTE
;
875 gen_offs_sp(0x89, r
, arg
*8);
877 d
= arg_prepare_reg(arg
);
878 orex(1,d
,r
,0x89); /* mov */
879 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
887 /* Copy R10 and R11 into RCX and RDX, respectively */
889 o(0xd1894c); /* mov %r10, %rcx */
891 o(0xda894c); /* mov %r11, %rdx */
900 #define FUNC_PROLOG_SIZE 11
902 /* generate function prolog of type 't' */
903 void gfunc_prolog(CType
*func_type
)
905 int addr
, reg_param_index
, bt
, size
;
914 ind
+= FUNC_PROLOG_SIZE
;
915 func_sub_sp_offset
= ind
;
918 sym
= func_type
->ref
;
920 /* if the function returns a structure, then add an
921 implicit pointer parameter */
923 func_var
= (sym
->c
== FUNC_ELLIPSIS
);
924 size
= gfunc_arg_size(&func_vt
);
926 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
932 /* define parameters */
933 while ((sym
= sym
->next
) != NULL
) {
935 bt
= type
->t
& VT_BTYPE
;
936 size
= gfunc_arg_size(type
);
938 if (reg_param_index
< REGN
) {
939 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
941 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
| VT_REF
, addr
);
943 if (reg_param_index
< REGN
) {
944 /* save arguments passed by register */
945 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
946 o(0xd60f66); /* movq */
947 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
949 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
952 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
958 while (reg_param_index
< REGN
) {
959 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
960 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
967 /* generate function epilog */
968 void gfunc_epilog(void)
973 if (func_ret_sub
== 0) {
978 g(func_ret_sub
>> 8);
982 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
983 /* align local size to word & save local variables */
984 v
= (func_scratch
+ -loc
+ 15) & -16;
987 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
988 oad(0xb8, v
); /* mov stacksize, %eax */
989 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
990 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
991 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
993 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
994 o(0xec8148); /* sub rsp, stacksize */
998 cur_text_section
->data_offset
= saved_ind
;
999 pe_add_unwind_data(ind
, saved_ind
, v
);
1000 ind
= cur_text_section
->data_offset
;
1005 static void gadd_sp(int val
)
1007 if (val
== (char)val
) {
1011 oad(0xc48148, val
); /* add $xxx, %rsp */
1015 typedef enum X86_64_Mode
{
1018 x86_64_mode_integer
,
1023 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1027 else if (a
== x86_64_mode_none
)
1029 else if (b
== x86_64_mode_none
)
1031 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1032 return x86_64_mode_memory
;
1033 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1034 return x86_64_mode_integer
;
1035 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1036 return x86_64_mode_memory
;
1038 return x86_64_mode_sse
;
1041 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1046 switch (ty
->t
& VT_BTYPE
) {
1047 case VT_VOID
: return x86_64_mode_none
;
1056 case VT_ENUM
: return x86_64_mode_integer
;
1059 case VT_DOUBLE
: return x86_64_mode_sse
;
1061 case VT_LDOUBLE
: return x86_64_mode_x87
;
1066 mode
= x86_64_mode_none
;
1067 for (f
= f
->next
; f
; f
= f
->next
)
1068 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1076 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1079 int size
, align
, ret_t
= 0;
1081 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1086 mode
= x86_64_mode_integer
;
1088 size
= type_size(ty
, &align
);
1089 *psize
= (size
+ 7) & ~7;
1090 *palign
= (align
+ 7) & ~7;
1093 mode
= x86_64_mode_memory
;
1095 mode
= classify_x86_64_inner(ty
);
1097 case x86_64_mode_integer
:
1103 ret_t
= (size
> 4) ? VT_LLONG
: VT_INT
;
1107 case x86_64_mode_x87
:
1112 case x86_64_mode_sse
:
1118 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1121 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1134 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1136 /* This definition must be synced with stdarg.h */
1137 enum __va_arg_type
{
1138 __va_gen_reg
, __va_float_reg
, __va_stack
1140 int size
, align
, reg_count
;
1141 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1143 default: return __va_stack
;
1144 case x86_64_mode_integer
: return __va_gen_reg
;
1145 case x86_64_mode_sse
: return __va_float_reg
;
1149 /* Return the number of registers needed to return the struct, or 0 if
1150 returning via struct pointer. */
1151 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1153 int size
, align
, reg_count
;
1154 *ret_align
= 1; // Never have to re-align return values for x86-64
1156 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1160 static const uint8_t arg_regs
[REGN
] = {
1161 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1164 static int arg_prepare_reg(int idx
) {
1165 if (idx
== 2 || idx
== 3)
1166 /* idx=2: r10, idx=3: r11 */
1169 return arg_regs
[idx
];
1172 /* Generate function call. The function address is pushed first, then
1173 all the parameters in call order. This functions pops all the
1174 parameters and the function address. */
1175 void gfunc_call(int nb_args
)
1179 int size
, align
, r
, args_size
, stack_adjust
, run_start
, run_end
, i
, reg_count
;
1180 int nb_reg_args
= 0;
1181 int nb_sse_args
= 0;
1182 int sse_reg
, gen_reg
;
1184 /* calculate the number of integer/float register arguments */
1185 for(i
= 0; i
< nb_args
; i
++) {
1186 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1187 if (mode
== x86_64_mode_sse
)
1188 nb_sse_args
+= reg_count
;
1189 else if (mode
== x86_64_mode_integer
)
1190 nb_reg_args
+= reg_count
;
1193 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1194 and ended by a 16-byte aligned argument. This is because, from the point of view of
1195 the callee, argument alignment is computed from the bottom up. */
1196 /* for struct arguments, we need to call memcpy and the function
1197 call breaks register passing arguments we are preparing.
1198 So, we process arguments which will be passed by stack first. */
1199 gen_reg
= nb_reg_args
;
1200 sse_reg
= nb_sse_args
;
1203 while (run_start
!= nb_args
) {
1204 int run_gen_reg
= gen_reg
, run_sse_reg
= sse_reg
;
1208 for(i
= run_start
; (i
< nb_args
) && (run_end
== nb_args
); i
++) {
1209 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1211 case x86_64_mode_memory
:
1212 case x86_64_mode_x87
:
1217 stack_adjust
+= size
;
1220 case x86_64_mode_sse
:
1221 sse_reg
-= reg_count
;
1222 if (sse_reg
+ reg_count
> 8) goto stack_arg
;
1225 case x86_64_mode_integer
:
1226 gen_reg
-= reg_count
;
1227 if (gen_reg
+ reg_count
> REGN
) goto stack_arg
;
1229 default: break; /* nothing to be done for x86_64_mode_none */
1233 gen_reg
= run_gen_reg
;
1234 sse_reg
= run_sse_reg
;
1236 /* adjust stack to align SSE boundary */
1237 if (stack_adjust
&= 15) {
1238 /* fetch cpu flag before the following sub will change the value */
1239 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
1242 stack_adjust
= 16 - stack_adjust
;
1244 oad(0xec81, stack_adjust
); /* sub $xxx, %rsp */
1245 args_size
+= stack_adjust
;
1248 for(i
= run_start
; i
< run_end
;) {
1249 /* Swap argument to top, it will possibly be changed here,
1250 and might use more temps. At the end of the loop we keep
1251 in on the stack and swap it back to its original position
1252 if it is a register. */
1253 SValue tmp
= vtop
[0];
1258 mode
= classify_x86_64_arg(&vtop
->type
, NULL
, &size
, &align
, ®_count
);
1260 switch (vtop
->type
.t
& VT_BTYPE
) {
1262 if (mode
== x86_64_mode_sse
) {
1264 sse_reg
-= reg_count
;
1267 } else if (mode
== x86_64_mode_integer
) {
1269 gen_reg
-= reg_count
;
1275 /* allocate the necessary size on stack */
1277 oad(0xec81, size
); /* sub $xxx, %rsp */
1278 /* generate structure store */
1279 r
= get_reg(RC_INT
);
1280 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1281 o(0xe0 + REG_VALUE(r
));
1282 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1295 assert(mode
== x86_64_mode_sse
);
1299 o(0x50); /* push $rax */
1300 /* movq %xmmN, (%rsp) */
1302 o(0x04 + REG_VALUE(r
)*8);
1311 assert(mode
== x86_64_mode_integer
);
1313 /* XXX: implicit cast ? */
1314 if (gen_reg
> REGN
) {
1317 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1325 /* And swap the argument back to it's original position. */
1332 assert((vtop
->type
.t
== tmp
.type
.t
) && (vtop
->r
== tmp
.r
));
1341 /* handle 16 byte aligned arguments at end of run */
1342 run_start
= i
= run_end
;
1343 while (i
< nb_args
) {
1344 /* Rotate argument to top since it will always be popped */
1345 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1351 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1353 oad(0xec8148, size
); /* sub $xxx, %rsp */
1354 o(0x7cdb); /* fstpt 0(%rsp) */
1359 assert(mode
== x86_64_mode_memory
);
1361 /* allocate the necessary size on stack */
1363 oad(0xec81, size
); /* sub $xxx, %rsp */
1364 /* generate structure store */
1365 r
= get_reg(RC_INT
);
1366 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1367 o(0xe0 + REG_VALUE(r
));
1368 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1379 /* XXX This should be superfluous. */
1380 save_regs(0); /* save used temporary registers */
1382 /* then, we prepare register passing arguments.
1383 Note that we cannot set RDX and RCX in this loop because gv()
1384 may break these temporary registers. Let's use R10 and R11
1386 assert(gen_reg
<= REGN
);
1387 assert(sse_reg
<= 8);
1388 for(i
= 0; i
< nb_args
; i
++) {
1389 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1390 /* Alter stack entry type so that gv() knows how to treat it */
1392 if (mode
== x86_64_mode_sse
) {
1393 if (reg_count
== 2) {
1395 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1396 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1397 /* movaps %xmm0, %xmmN */
1399 o(0xc0 + (sse_reg
<< 3));
1400 /* movaps %xmm1, %xmmN */
1402 o(0xc1 + ((sse_reg
+1) << 3));
1405 assert(reg_count
== 1);
1407 /* Load directly to register */
1408 gv(RC_XMM0
<< sse_reg
);
1410 } else if (mode
== x86_64_mode_integer
) {
1412 /* XXX: implicit cast ? */
1414 gen_reg
-= reg_count
;
1416 d
= arg_prepare_reg(gen_reg
);
1417 orex(1,d
,r
,0x89); /* mov */
1418 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1419 if (reg_count
== 2) {
1420 d
= arg_prepare_reg(gen_reg
+1);
1421 orex(1,d
,vtop
->r2
,0x89); /* mov */
1422 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1427 assert(gen_reg
== 0);
1428 assert(sse_reg
== 0);
1430 /* We shouldn't have many operands on the stack anymore, but the
1431 call address itself is still there, and it might be in %eax
1432 (or edx/ecx) currently, which the below writes would clobber.
1433 So evict all remaining operands here. */
1436 /* Copy R10 and R11 into RDX and RCX, respectively */
1437 if (nb_reg_args
> 2) {
1438 o(0xd2894c); /* mov %r10, %rdx */
1439 if (nb_reg_args
> 3) {
1440 o(0xd9894c); /* mov %r11, %rcx */
1444 if (vtop
->type
.ref
->c
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1445 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1453 #define FUNC_PROLOG_SIZE 11
1455 static void push_arg_reg(int i
) {
1457 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1460 /* generate function prolog of type 't' */
1461 void gfunc_prolog(CType
*func_type
)
1464 int i
, addr
, align
, size
, reg_count
;
1465 int param_addr
= 0, reg_param_index
, sse_param_index
;
1469 sym
= func_type
->ref
;
1470 addr
= PTR_SIZE
* 2;
1472 ind
+= FUNC_PROLOG_SIZE
;
1473 func_sub_sp_offset
= ind
;
1476 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
1477 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1478 seen_reg_num
= seen_sse_num
= 0;
1479 /* frame pointer and return address */
1480 seen_stack_size
= PTR_SIZE
* 2;
1481 /* count the number of seen parameters */
1482 sym
= func_type
->ref
;
1483 while ((sym
= sym
->next
) != NULL
) {
1485 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1489 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1492 case x86_64_mode_integer
:
1493 if (seen_reg_num
+ reg_count
<= 8) {
1494 seen_reg_num
+= reg_count
;
1501 case x86_64_mode_sse
:
1502 if (seen_sse_num
+ reg_count
<= 8) {
1503 seen_sse_num
+= reg_count
;
1513 /* movl $0x????????, -0x10(%rbp) */
1515 gen_le32(seen_reg_num
* 8);
1516 /* movl $0x????????, -0xc(%rbp) */
1518 gen_le32(seen_sse_num
* 16 + 48);
1519 /* movl $0x????????, -0x8(%rbp) */
1521 gen_le32(seen_stack_size
);
1523 /* save all register passing arguments */
1524 for (i
= 0; i
< 8; i
++) {
1526 o(0xd60f66); /* movq */
1527 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1528 /* movq $0, loc+8(%rbp) */
1533 for (i
= 0; i
< REGN
; i
++) {
1534 push_arg_reg(REGN
-1-i
);
1538 sym
= func_type
->ref
;
1539 reg_param_index
= 0;
1540 sse_param_index
= 0;
1542 /* if the function returns a structure, then add an
1543 implicit pointer parameter */
1544 func_vt
= sym
->type
;
1545 mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1546 if (mode
== x86_64_mode_memory
) {
1547 push_arg_reg(reg_param_index
);
1551 /* define parameters */
1552 while ((sym
= sym
->next
) != NULL
) {
1554 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1556 case x86_64_mode_sse
:
1557 if (sse_param_index
+ reg_count
<= 8) {
1558 /* save arguments passed by register */
1559 loc
-= reg_count
* 8;
1561 for (i
= 0; i
< reg_count
; ++i
) {
1562 o(0xd60f66); /* movq */
1563 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1567 addr
= (addr
+ align
- 1) & -align
;
1573 case x86_64_mode_memory
:
1574 case x86_64_mode_x87
:
1575 addr
= (addr
+ align
- 1) & -align
;
1580 case x86_64_mode_integer
: {
1581 if (reg_param_index
+ reg_count
<= REGN
) {
1582 /* save arguments passed by register */
1583 loc
-= reg_count
* 8;
1585 for (i
= 0; i
< reg_count
; ++i
) {
1586 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1590 addr
= (addr
+ align
- 1) & -align
;
1596 default: break; /* nothing to be done for x86_64_mode_none */
1598 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1599 VT_LOCAL
| VT_LVAL
, param_addr
);
1602 #ifdef CONFIG_TCC_BCHECK
1603 /* leave some room for bound checking code */
1604 if (tcc_state
->do_bounds_check
) {
1605 func_bound_offset
= lbounds_section
->data_offset
;
1606 func_bound_ind
= ind
;
1607 oad(0xb8, 0); /* lbound section pointer */
1608 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1609 oad(0xb8, 0); /* call to function */
1614 /* generate function epilog */
1615 void gfunc_epilog(void)
1619 #ifdef CONFIG_TCC_BCHECK
1620 if (tcc_state
->do_bounds_check
1621 && func_bound_offset
!= lbounds_section
->data_offset
)
1627 /* add end of table info */
1628 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
1631 /* generate bound local allocation */
1632 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
1633 func_bound_offset
, lbounds_section
->data_offset
);
1635 ind
= func_bound_ind
;
1636 greloc(cur_text_section
, sym_data
, ind
+ 1, R_386_32
);
1638 gen_static_call(TOK___bound_local_new
);
1641 /* generate bound check local freeing */
1642 o(0x5250); /* save returned value, if any */
1643 greloc(cur_text_section
, sym_data
, ind
+ 1, R_386_32
);
1644 oad(0xb8, 0); /* mov xxx, %rax */
1645 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1646 gen_static_call(TOK___bound_local_delete
);
1647 o(0x585a); /* restore returned value, if any */
1650 o(0xc9); /* leave */
1651 if (func_ret_sub
== 0) {
1654 o(0xc2); /* ret n */
1656 g(func_ret_sub
>> 8);
1658 /* align local size to word & save local variables */
1659 v
= (-loc
+ 15) & -16;
1661 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1662 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1663 o(0xec8148); /* sub rsp, stacksize */
1670 /* generate a jump to a label */
1673 return psym(0xe9, t
);
1676 /* generate a jump to a fixed address */
1677 void gjmp_addr(int a
)
1685 oad(0xe9, a
- ind
- 5);
1689 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1690 int gtst(int inv
, int t
)
1692 int v
= vtop
->r
& VT_VALMASK
;
1694 /* fast case : can jump directly since flags are set */
1695 if (vtop
->c
.i
& 0x100)
1697 /* This was a float compare. If the parity flag is set
1698 the result was unordered. For anything except != this
1699 means false and we don't jump (anding both conditions).
1700 For != this means true (oring both).
1701 Take care about inverting the test. We need to jump
1702 to our target if the result was unordered and test wasn't NE,
1703 otherwise if unordered we don't want to jump. */
1704 vtop
->c
.i
&= ~0x100;
1705 if (inv
== (vtop
->c
.i
== TOK_NE
))
1706 o(0x067a); /* jp +6 */
1710 t
= psym(0x8a, t
); /* jp t */
1714 t
= psym((vtop
->c
.i
- 16) ^ inv
, t
);
1715 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1716 /* && or || optimization */
1717 if ((v
& 1) == inv
) {
1718 /* insert vtop->c jump list in t */
1719 uint32_t n1
, n
= vtop
->c
.i
;
1721 while ((n1
= read32le(cur_text_section
->data
+ n
)))
1723 write32le(cur_text_section
->data
+ n
, t
);
1735 /* generate an integer binary operation */
1736 void gen_opi(int op
)
1741 ll
= is64_type(vtop
[-1].type
.t
);
1742 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1743 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1747 case TOK_ADDC1
: /* add with carry generation */
1750 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1757 /* XXX: generate inc and dec for smaller code ? */
1758 orex(ll
, r
, 0, 0x83);
1759 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1762 orex(ll
, r
, 0, 0x81);
1763 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1766 gv2(RC_INT
, RC_INT
);
1769 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1770 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1773 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1779 case TOK_SUBC1
: /* sub with carry generation */
1782 case TOK_ADDC2
: /* add with carry use */
1785 case TOK_SUBC2
: /* sub with carry use */
1798 gv2(RC_INT
, RC_INT
);
1801 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1802 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1814 opc
= 0xc0 | (opc
<< 3);
1820 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1821 o(opc
| REG_VALUE(r
));
1822 g(vtop
->c
.i
& (ll
? 63 : 31));
1824 /* we generate the shift in ecx */
1825 gv2(RC_INT
, RC_RCX
);
1827 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1828 o(opc
| REG_VALUE(r
));
1841 /* first operand must be in eax */
1842 /* XXX: need better constraint for second operand */
1843 gv2(RC_RAX
, RC_RCX
);
1848 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1849 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1850 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1851 if (op
== '%' || op
== TOK_UMOD
)
1863 void gen_opl(int op
)
1868 /* generate a floating point operation 'v = t1 op t2' instruction. The
1869 two operands are guaranted to have the same floating point type */
1870 /* XXX: need to use ST1 too */
1871 void gen_opf(int op
)
1873 int a
, ft
, fc
, swapped
, r
;
1875 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1877 /* convert constants to memory references */
1878 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1883 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1886 /* must put at least one value in the floating point register */
1887 if ((vtop
[-1].r
& VT_LVAL
) &&
1888 (vtop
[0].r
& VT_LVAL
)) {
1894 /* swap the stack if needed so that t1 is the register and t2 is
1895 the memory reference */
1896 if (vtop
[-1].r
& VT_LVAL
) {
1900 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1901 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1902 /* load on stack second operand */
1903 load(TREG_ST0
, vtop
);
1904 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1905 if (op
== TOK_GE
|| op
== TOK_GT
)
1907 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1910 o(0xc9d9); /* fxch %st(1) */
1911 if (op
== TOK_EQ
|| op
== TOK_NE
)
1912 o(0xe9da); /* fucompp */
1914 o(0xd9de); /* fcompp */
1915 o(0xe0df); /* fnstsw %ax */
1917 o(0x45e480); /* and $0x45, %ah */
1918 o(0x40fC80); /* cmp $0x40, %ah */
1919 } else if (op
== TOK_NE
) {
1920 o(0x45e480); /* and $0x45, %ah */
1921 o(0x40f480); /* xor $0x40, %ah */
1923 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1924 o(0x05c4f6); /* test $0x05, %ah */
1927 o(0x45c4f6); /* test $0x45, %ah */
1934 /* no memory reference possible for long double operations */
1935 load(TREG_ST0
, vtop
);
1959 o(0xde); /* fxxxp %st, %st(1) */
1964 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1965 /* if saved lvalue, then we must reload it */
1968 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1970 r
= get_reg(RC_INT
);
1972 v1
.r
= VT_LOCAL
| VT_LVAL
;
1978 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1981 if (op
== TOK_LE
|| op
== TOK_LT
)
1983 if (op
== TOK_LE
|| op
== TOK_GE
) {
1984 op
= 0x93; /* setae */
1986 op
= 0x97; /* seta */
1994 assert(!(vtop
[-1].r
& VT_LVAL
));
1996 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1998 if (op
== TOK_EQ
|| op
== TOK_NE
)
1999 o(0x2e0f); /* ucomisd */
2001 o(0x2f0f); /* comisd */
2003 if (vtop
->r
& VT_LVAL
) {
2004 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2006 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2011 vtop
->c
.i
= op
| 0x100;
2013 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
2031 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
2034 /* if saved lvalue, then we must reload it */
2035 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
2037 r
= get_reg(RC_INT
);
2039 v1
.r
= VT_LOCAL
| VT_LVAL
;
2045 assert(!(vtop
[-1].r
& VT_LVAL
));
2047 assert(vtop
->r
& VT_LVAL
);
2052 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2060 if (vtop
->r
& VT_LVAL
) {
2061 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2063 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2071 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2072 and 'long long' cases. */
2073 void gen_cvt_itof(int t
)
2075 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2078 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2079 /* signed long long to float/double/long double (unsigned case
2080 is handled generically) */
2081 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2082 o(0x242cdf); /* fildll (%rsp) */
2083 o(0x08c48348); /* add $8, %rsp */
2084 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2085 (VT_INT
| VT_UNSIGNED
)) {
2086 /* unsigned int to float/double/long double */
2087 o(0x6a); /* push $0 */
2089 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2090 o(0x242cdf); /* fildll (%rsp) */
2091 o(0x10c48348); /* add $16, %rsp */
2093 /* int to float/double/long double */
2094 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2095 o(0x2404db); /* fildl (%rsp) */
2096 o(0x08c48348); /* add $8, %rsp */
2100 int r
= get_reg(RC_FLOAT
);
2102 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2103 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2104 (VT_INT
| VT_UNSIGNED
) ||
2105 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2109 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2114 /* convert from one floating point type to another */
2115 void gen_cvt_ftof(int t
)
2123 if (bt
== VT_FLOAT
) {
2125 if (tbt
== VT_DOUBLE
) {
2126 o(0x140f); /* unpcklps */
2127 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2128 o(0x5a0f); /* cvtps2pd */
2129 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2130 } else if (tbt
== VT_LDOUBLE
) {
2132 /* movss %xmm0,-0x10(%rsp) */
2134 o(0x44 + REG_VALUE(vtop
->r
)*8);
2136 o(0xf02444d9); /* flds -0x10(%rsp) */
2139 } else if (bt
== VT_DOUBLE
) {
2141 if (tbt
== VT_FLOAT
) {
2142 o(0x140f66); /* unpcklpd */
2143 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2144 o(0x5a0f66); /* cvtpd2ps */
2145 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2146 } else if (tbt
== VT_LDOUBLE
) {
2148 /* movsd %xmm0,-0x10(%rsp) */
2150 o(0x44 + REG_VALUE(vtop
->r
)*8);
2152 o(0xf02444dd); /* fldl -0x10(%rsp) */
2158 r
= get_reg(RC_FLOAT
);
2159 if (tbt
== VT_DOUBLE
) {
2160 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2161 /* movsd -0x10(%rsp),%xmm0 */
2163 o(0x44 + REG_VALUE(r
)*8);
2166 } else if (tbt
== VT_FLOAT
) {
2167 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2168 /* movss -0x10(%rsp),%xmm0 */
2170 o(0x44 + REG_VALUE(r
)*8);
2177 /* convert fp to int 't' type */
2178 void gen_cvt_ftoi(int t
)
2180 int ft
, bt
, size
, r
;
2183 if (bt
== VT_LDOUBLE
) {
2184 gen_cvt_ftof(VT_DOUBLE
);
2194 r
= get_reg(RC_INT
);
2195 if (bt
== VT_FLOAT
) {
2197 } else if (bt
== VT_DOUBLE
) {
2202 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2203 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2207 /* computed goto support */
2214 /* Save the stack pointer onto the stack and return the location of its address */
2215 ST_FUNC
void gen_vla_sp_save(int addr
) {
2216 /* mov %rsp,addr(%rbp)*/
2217 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2220 /* Restore the SP from a location on the stack */
2221 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2222 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2225 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2226 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2227 #ifdef TCC_TARGET_PE
2228 /* alloca does more than just adjust %rsp on Windows */
2229 vpush_global_sym(&func_old_type
, TOK_alloca
);
2230 vswap(); /* Move alloca ref past allocation size */
2234 r
= gv(RC_INT
); /* allocation size */
2237 o(0xe0 | REG_VALUE(r
));
2238 /* We align to 16 bytes rather than align */
2246 /* end of x86-64 code generator */
2247 /*************************************************************/
2248 #endif /* ! TARGET_DEFS_ONLY */
2249 /******************************************************/