2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
27 #define NB_ASM_REGS 16
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
37 #define RC_ST0 0x0080 /* only for long double */
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
104 /******************************************************/
105 #else /* ! TARGET_DEFS_ONLY */
106 /******************************************************/
110 ST_DATA
const int reg_classes
[NB_REGS
] = {
111 /* eax */ RC_INT
| RC_RAX
,
112 /* ecx */ RC_INT
| RC_RCX
,
113 /* edx */ RC_INT
| RC_RDX
,
127 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
128 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
129 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
130 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
131 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
132 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
133 /* xmm6 an xmm7 are included so gv() can be used on them,
134 but they are not tagged with RC_FLOAT because they are
135 callee saved on Windows */
141 static unsigned long func_sub_sp_offset
;
142 static int func_ret_sub
;
144 /* XXX: make it faster ? */
145 ST_FUNC
void g(int c
)
151 if (ind1
> cur_text_section
->data_allocated
)
152 section_realloc(cur_text_section
, ind1
);
153 cur_text_section
->data
[ind
] = c
;
157 ST_FUNC
void o(unsigned int c
)
165 ST_FUNC
void gen_le16(int v
)
171 ST_FUNC
void gen_le32(int c
)
179 ST_FUNC
void gen_le64(int64_t c
)
191 static void orex(int ll
, int r
, int r2
, int b
)
193 if ((r
& VT_VALMASK
) >= VT_CONST
)
195 if ((r2
& VT_VALMASK
) >= VT_CONST
)
197 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
198 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
202 /* output a symbol and patch all calls to it */
203 ST_FUNC
void gsym_addr(int t
, int a
)
206 unsigned char *ptr
= cur_text_section
->data
+ t
;
207 uint32_t n
= read32le(ptr
); /* next value */
208 write32le(ptr
, a
- t
- 4);
219 static int is64_type(int t
)
221 return ((t
& VT_BTYPE
) == VT_PTR
||
222 (t
& VT_BTYPE
) == VT_FUNC
||
223 (t
& VT_BTYPE
) == VT_LLONG
);
226 /* instruction + 4 bytes data. Return the address of the data */
227 ST_FUNC
int oad(int c
, int s
)
238 /* generate jmp to a label */
239 #define gjmp2(instr,lbl) oad(instr,lbl)
241 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, long c
)
244 greloca(cur_text_section
, sym
, ind
, R_X86_64_32S
, c
), c
=0;
248 /* output constant with relocation if 'r & VT_SYM' is true */
249 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
252 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
256 /* output constant with relocation if 'r & VT_SYM' is true */
257 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, long c
)
260 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
264 /* output got address with relocation */
265 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
267 #ifndef TCC_TARGET_PE
268 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
270 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
271 get_tok_str(sym
->v
, NULL
), c
, r
,
272 cur_text_section
->data
[ind
-3],
273 cur_text_section
->data
[ind
-2],
274 cur_text_section
->data
[ind
-1]
276 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
280 /* we use add c, %xxx for displacement */
282 o(0xc0 + REG_VALUE(r
));
287 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
289 op_reg
= REG_VALUE(op_reg
) << 3;
290 if ((r
& VT_VALMASK
) == VT_CONST
) {
291 /* constant memory reference */
294 gen_gotpcrel(r
, sym
, c
);
296 gen_addrpc32(r
, sym
, c
);
298 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
299 /* currently, we use only ebp as base */
301 /* short reference */
305 oad(0x85 | op_reg
, c
);
307 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
309 g(0x80 | op_reg
| REG_VALUE(r
));
312 g(0x00 | op_reg
| REG_VALUE(r
));
315 g(0x00 | op_reg
| REG_VALUE(r
));
319 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
321 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
323 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
326 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
328 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
331 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
332 orex(1, r
, op_reg
, opcode
);
333 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
337 /* load 'r' from value 'sv' */
338 void load(int r
, SValue
*sv
)
340 int v
, t
, ft
, fc
, fr
;
345 sv
= pe_getimport(sv
, &v2
);
349 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
351 if (fc
!= sv
->c
.i
&& (fr
& VT_SYM
))
352 tcc_error("64 bit addend in load");
354 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
356 #ifndef TCC_TARGET_PE
357 /* we use indirect access via got */
358 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
359 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
360 /* use the result register as a temporal register */
361 int tr
= r
| TREG_MEM
;
363 /* we cannot use float registers as a temporal register */
364 tr
= get_reg(RC_INT
) | TREG_MEM
;
366 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
368 /* load from the temporal register */
376 if (v
== VT_LLOCAL
) {
378 v1
.r
= VT_LOCAL
| VT_LVAL
;
381 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
382 fr
= get_reg(RC_INT
);
386 /* Like GCC we can load from small enough properly sized
387 structs and unions as well.
388 XXX maybe move to generic operand handling, but should
389 occur only with asm, so tccasm.c might also be a better place */
390 if ((ft
& VT_BTYPE
) == VT_STRUCT
) {
392 switch (type_size(&sv
->type
, &align
)) {
393 case 1: ft
= VT_BYTE
; break;
394 case 2: ft
= VT_SHORT
; break;
395 case 4: ft
= VT_INT
; break;
396 case 8: ft
= VT_LLONG
; break;
398 tcc_error("invalid aggregate type for register load");
402 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
404 r
= REG_VALUE(r
); /* movd */
405 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
406 b
= 0x7e0ff3; /* movq */
408 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
409 b
= 0xdb, r
= 5; /* fldt */
410 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
411 b
= 0xbe0f; /* movsbl */
412 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
413 b
= 0xb60f; /* movzbl */
414 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
415 b
= 0xbf0f; /* movswl */
416 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
417 b
= 0xb70f; /* movzwl */
419 assert(((ft
& VT_BTYPE
) == VT_INT
) || ((ft
& VT_BTYPE
) == VT_LLONG
)
420 || ((ft
& VT_BTYPE
) == VT_PTR
) || ((ft
& VT_BTYPE
) == VT_ENUM
)
421 || ((ft
& VT_BTYPE
) == VT_FUNC
));
426 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
429 gen_modrm(r
, fr
, sv
->sym
, fc
);
436 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
437 gen_addrpc32(fr
, sv
->sym
, fc
);
439 if (sv
->sym
->type
.t
& VT_STATIC
) {
441 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
442 gen_addrpc32(fr
, sv
->sym
, fc
);
445 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
446 gen_gotpcrel(r
, sv
->sym
, fc
);
449 } else if (is64_type(ft
)) {
450 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
453 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
456 } else if (v
== VT_LOCAL
) {
457 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
458 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
459 } else if (v
== VT_CMP
) {
461 if ((fc
& ~0x100) != TOK_NE
)
462 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
464 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
467 /* This was a float compare. If the parity bit is
468 set the result was unordered, meaning false for everything
469 except TOK_NE, and true for TOK_NE. */
471 o(0x037a + (REX_BASE(r
) << 8));
473 orex(0,r
,0, 0x0f); /* setxx %br */
475 o(0xc0 + REG_VALUE(r
));
476 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
479 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
480 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
483 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
485 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
487 /* gen_cvt_ftof(VT_DOUBLE); */
488 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
489 /* movsd -0x10(%rsp),%xmmN */
491 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
494 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
495 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
498 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
501 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
503 } else if (r
== TREG_ST0
) {
504 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
505 /* gen_cvt_ftof(VT_LDOUBLE); */
506 /* movsd %xmmN,-0x10(%rsp) */
508 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
510 o(0xf02444dd); /* fldl -0x10(%rsp) */
513 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
519 /* store register 'r' in lvalue 'v' */
520 void store(int r
, SValue
*v
)
524 /* store the REX prefix in this variable when PIC is enabled */
529 v
= pe_getimport(v
, &v2
);
532 fr
= v
->r
& VT_VALMASK
;
535 if (fc
!= v
->c
.i
&& (fr
& VT_SYM
))
536 tcc_error("64 bit addend in store");
537 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
540 #ifndef TCC_TARGET_PE
541 /* we need to access the variable via got */
542 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
543 /* mov xx(%rip), %r11 */
545 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
546 pic
= is64_type(bt
) ? 0x49 : 0x41;
550 /* XXX: incorrect if float reg to reg */
551 if (bt
== VT_FLOAT
) {
554 o(0x7e0f); /* movd */
556 } else if (bt
== VT_DOUBLE
) {
559 o(0xd60f); /* movq */
561 } else if (bt
== VT_LDOUBLE
) {
562 o(0xc0d9); /* fld %st(0) */
570 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
572 else if (is64_type(bt
))
578 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
583 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
584 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
585 } else if (fr
!= r
) {
586 /* XXX: don't we really come here? */
588 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
591 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
592 gen_modrm(r
, v
->r
, v
->sym
, fc
);
593 } else if (fr
!= r
) {
594 /* XXX: don't we really come here? */
596 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
601 /* 'is_jmp' is '1' if it is a jump */
602 static void gcall_or_jmp(int is_jmp
)
605 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
606 ((vtop
->r
& VT_SYM
) || (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
608 if (vtop
->r
& VT_SYM
) {
609 /* relocation case */
611 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
613 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
616 /* put an empty PC32 relocation */
617 put_elf_reloca(symtab_section
, cur_text_section
,
618 ind
+ 1, R_X86_64_PC32
, 0, (int)(vtop
->c
.i
-4));
620 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
622 /* otherwise, indirect call */
626 o(0xff); /* call/jmp *r */
627 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
631 #if defined(CONFIG_TCC_BCHECK)
632 #ifndef TCC_TARGET_PE
633 static addr_t func_bound_offset
;
634 static unsigned long func_bound_ind
;
637 static void gen_static_call(int v
)
639 Sym
*sym
= external_global_sym(v
, &func_old_type
, 0);
641 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
644 /* generate a bounded pointer addition */
645 ST_FUNC
void gen_bounded_ptr_add(void)
647 /* save all temporary registers */
650 /* prepare fast x86_64 function call */
652 o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size
656 o(0xc78948); // mov %rax,%rdi ## first arg in %rdi, this must be ptr
659 /* do a fast function call */
660 gen_static_call(TOK___bound_ptr_add
);
662 /* returned pointer is in rax */
664 vtop
->r
= TREG_RAX
| VT_BOUNDED
;
667 /* relocation offset of the bounding function call point */
668 vtop
->c
.i
= (cur_text_section
->reloc
->data_offset
- sizeof(ElfW(Rela
)));
671 /* patch pointer addition in vtop so that pointer dereferencing is
673 ST_FUNC
void gen_bounded_ptr_deref(void)
681 /* XXX: put that code in generic part of tcc */
682 if (!is_float(vtop
->type
.t
)) {
683 if (vtop
->r
& VT_LVAL_BYTE
)
685 else if (vtop
->r
& VT_LVAL_SHORT
)
689 size
= type_size(&vtop
->type
, &align
);
691 case 1: func
= TOK___bound_ptr_indir1
; break;
692 case 2: func
= TOK___bound_ptr_indir2
; break;
693 case 4: func
= TOK___bound_ptr_indir4
; break;
694 case 8: func
= TOK___bound_ptr_indir8
; break;
695 case 12: func
= TOK___bound_ptr_indir12
; break;
696 case 16: func
= TOK___bound_ptr_indir16
; break;
698 tcc_error("unhandled size when dereferencing bounded pointer");
703 sym
= external_global_sym(func
, &func_old_type
, 0);
705 put_extern_sym(sym
, NULL
, 0, 0);
707 /* patch relocation */
708 /* XXX: find a better solution ? */
710 rel
= (ElfW(Rela
) *)(cur_text_section
->reloc
->data
+ vtop
->c
.i
);
711 rel
->r_info
= ELF64_R_INFO(sym
->c
, ELF64_R_TYPE(rel
->r_info
));
718 static const uint8_t arg_regs
[REGN
] = {
719 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
722 /* Prepare arguments in R10 and R11 rather than RCX and RDX
723 because gv() will not ever use these */
724 static int arg_prepare_reg(int idx
) {
725 if (idx
== 0 || idx
== 1)
726 /* idx=0: r10, idx=1: r11 */
729 return arg_regs
[idx
];
732 static int func_scratch
;
734 /* Generate function call. The function address is pushed first, then
735 all the parameters in call order. This functions pops all the
736 parameters and the function address. */
738 void gen_offs_sp(int b
, int r
, int d
)
740 orex(1,0,r
& 0x100 ? 0 : r
, b
);
742 o(0x2444 | (REG_VALUE(r
) << 3));
745 o(0x2484 | (REG_VALUE(r
) << 3));
750 /* Return the number of registers needed to return the struct, or 0 if
751 returning via struct pointer. */
752 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
756 *ret_align
= 1; // Never have to re-align return values for x86-64
757 size
= type_size(vt
, &align
);
761 } else if (size
> 4) {
764 } else if (size
> 2) {
767 } else if (size
> 1) {
776 static int is_sse_float(int t
) {
779 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
782 int gfunc_arg_size(CType
*type
) {
784 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
786 return type_size(type
, &align
);
789 void gfunc_call(int nb_args
)
791 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
794 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
797 /* for struct arguments, we need to call memcpy and the function
798 call breaks register passing arguments we are preparing.
799 So, we process arguments which will be passed by stack first. */
800 struct_size
= args_size
;
801 for(i
= 0; i
< nb_args
; i
++) {
806 bt
= (sv
->type
.t
& VT_BTYPE
);
807 size
= gfunc_arg_size(&sv
->type
);
810 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
812 if (bt
== VT_STRUCT
) {
813 /* align to stack align size */
814 size
= (size
+ 15) & ~15;
815 /* generate structure store */
817 gen_offs_sp(0x8d, r
, struct_size
);
820 /* generate memcpy call */
821 vset(&sv
->type
, r
| VT_LVAL
, 0);
825 } else if (bt
== VT_LDOUBLE
) {
827 gen_offs_sp(0xdb, 0x107, struct_size
);
832 if (func_scratch
< struct_size
)
833 func_scratch
= struct_size
;
836 struct_size
= args_size
;
838 for(i
= 0; i
< nb_args
; i
++) {
840 bt
= (vtop
->type
.t
& VT_BTYPE
);
842 size
= gfunc_arg_size(&vtop
->type
);
844 /* align to stack align size */
845 size
= (size
+ 15) & ~15;
848 gen_offs_sp(0x8d, d
, struct_size
);
849 gen_offs_sp(0x89, d
, arg
*8);
851 d
= arg_prepare_reg(arg
);
852 gen_offs_sp(0x8d, d
, struct_size
);
856 if (is_sse_float(vtop
->type
.t
)) {
857 if (tcc_state
->nosse
)
858 tcc_error("SSE disabled");
859 gv(RC_XMM0
); /* only use one float register */
861 /* movq %xmm0, j*8(%rsp) */
862 gen_offs_sp(0xd60f66, 0x100, arg
*8);
864 /* movaps %xmm0, %xmmN */
866 o(0xc0 + (arg
<< 3));
867 d
= arg_prepare_reg(arg
);
868 /* mov %xmm0, %rxx */
871 o(0xc0 + REG_VALUE(d
));
874 if (bt
== VT_STRUCT
) {
875 vtop
->type
.ref
= NULL
;
876 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
877 : size
> 1 ? VT_SHORT
: VT_BYTE
;
882 gen_offs_sp(0x89, r
, arg
*8);
884 d
= arg_prepare_reg(arg
);
885 orex(1,d
,r
,0x89); /* mov */
886 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
894 /* Copy R10 and R11 into RCX and RDX, respectively */
896 o(0xd1894c); /* mov %r10, %rcx */
898 o(0xda894c); /* mov %r11, %rdx */
903 /* other compilers don't clear the upper bits when returning char/short */
904 bt
= vtop
->type
.ref
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
);
905 if (bt
== (VT_BYTE
| VT_UNSIGNED
))
906 o(0xc0b60f); /* movzbl %al, %eax */
907 else if (bt
== VT_BYTE
)
908 o(0xc0be0f); /* movsbl %al, %eax */
909 else if (bt
== VT_SHORT
)
911 else if (bt
== (VT_SHORT
| VT_UNSIGNED
))
912 o(0xc0b70f); /* movzbl %al, %eax */
913 #if 0 /* handled in gen_cast() */
914 else if (bt
== VT_INT
)
915 o(0x9848); /* cltq */
916 else if (bt
== (VT_INT
| VT_UNSIGNED
))
917 o(0xc089); /* mov %eax,%eax */
923 #define FUNC_PROLOG_SIZE 11
925 /* generate function prolog of type 't' */
926 void gfunc_prolog(CType
*func_type
)
928 int addr
, reg_param_index
, bt
, size
;
937 ind
+= FUNC_PROLOG_SIZE
;
938 func_sub_sp_offset
= ind
;
941 sym
= func_type
->ref
;
943 /* if the function returns a structure, then add an
944 implicit pointer parameter */
946 func_var
= (sym
->c
== FUNC_ELLIPSIS
);
947 size
= gfunc_arg_size(&func_vt
);
949 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
955 /* define parameters */
956 while ((sym
= sym
->next
) != NULL
) {
958 bt
= type
->t
& VT_BTYPE
;
959 size
= gfunc_arg_size(type
);
961 if (reg_param_index
< REGN
) {
962 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
964 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
| VT_REF
, addr
);
966 if (reg_param_index
< REGN
) {
967 /* save arguments passed by register */
968 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
969 if (tcc_state
->nosse
)
970 tcc_error("SSE disabled");
971 o(0xd60f66); /* movq */
972 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
974 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
977 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
983 while (reg_param_index
< REGN
) {
984 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
985 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
992 /* generate function epilog */
993 void gfunc_epilog(void)
998 if (func_ret_sub
== 0) {
1001 o(0xc2); /* ret n */
1003 g(func_ret_sub
>> 8);
1007 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1008 /* align local size to word & save local variables */
1009 v
= (func_scratch
+ -loc
+ 15) & -16;
1012 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
1013 oad(0xb8, v
); /* mov stacksize, %eax */
1014 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1015 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
1016 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1018 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1019 o(0xec8148); /* sub rsp, stacksize */
1023 cur_text_section
->data_offset
= saved_ind
;
1024 pe_add_unwind_data(ind
, saved_ind
, v
);
1025 ind
= cur_text_section
->data_offset
;
1030 static void gadd_sp(int val
)
1032 if (val
== (char)val
) {
1036 oad(0xc48148, val
); /* add $xxx, %rsp */
1040 typedef enum X86_64_Mode
{
1043 x86_64_mode_integer
,
1048 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1052 else if (a
== x86_64_mode_none
)
1054 else if (b
== x86_64_mode_none
)
1056 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1057 return x86_64_mode_memory
;
1058 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1059 return x86_64_mode_integer
;
1060 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1061 return x86_64_mode_memory
;
1063 return x86_64_mode_sse
;
1066 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1071 switch (ty
->t
& VT_BTYPE
) {
1072 case VT_VOID
: return x86_64_mode_none
;
1081 case VT_ENUM
: return x86_64_mode_integer
;
1084 case VT_DOUBLE
: return x86_64_mode_sse
;
1086 case VT_LDOUBLE
: return x86_64_mode_x87
;
1091 mode
= x86_64_mode_none
;
1092 for (f
= f
->next
; f
; f
= f
->next
)
1093 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1101 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1104 int size
, align
, ret_t
= 0;
1106 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1111 mode
= x86_64_mode_integer
;
1113 size
= type_size(ty
, &align
);
1114 *psize
= (size
+ 7) & ~7;
1115 *palign
= (align
+ 7) & ~7;
1118 mode
= x86_64_mode_memory
;
1120 mode
= classify_x86_64_inner(ty
);
1122 case x86_64_mode_integer
:
1128 ret_t
= (size
> 4) ? VT_LLONG
: VT_INT
;
1132 case x86_64_mode_x87
:
1137 case x86_64_mode_sse
:
1143 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1146 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1159 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1161 /* This definition must be synced with stdarg.h */
1162 enum __va_arg_type
{
1163 __va_gen_reg
, __va_float_reg
, __va_stack
1165 int size
, align
, reg_count
;
1166 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1168 default: return __va_stack
;
1169 case x86_64_mode_integer
: return __va_gen_reg
;
1170 case x86_64_mode_sse
: return __va_float_reg
;
1174 /* Return the number of registers needed to return the struct, or 0 if
1175 returning via struct pointer. */
1176 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1178 int size
, align
, reg_count
;
1179 *ret_align
= 1; // Never have to re-align return values for x86-64
1181 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1185 static const uint8_t arg_regs
[REGN
] = {
1186 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1189 static int arg_prepare_reg(int idx
) {
1190 if (idx
== 2 || idx
== 3)
1191 /* idx=2: r10, idx=3: r11 */
1194 return arg_regs
[idx
];
1197 /* Generate function call. The function address is pushed first, then
1198 all the parameters in call order. This functions pops all the
1199 parameters and the function address. */
1200 void gfunc_call(int nb_args
)
1204 int size
, align
, r
, args_size
, stack_adjust
, run_start
, run_end
, i
, reg_count
;
1205 int nb_reg_args
= 0;
1206 int nb_sse_args
= 0;
1207 int sse_reg
, gen_reg
;
1209 /* calculate the number of integer/float register arguments */
1210 for(i
= 0; i
< nb_args
; i
++) {
1211 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1212 if (mode
== x86_64_mode_sse
)
1213 nb_sse_args
+= reg_count
;
1214 else if (mode
== x86_64_mode_integer
)
1215 nb_reg_args
+= reg_count
;
1218 if (nb_sse_args
&& tcc_state
->nosse
)
1219 tcc_error("SSE disabled but floating point arguments passed");
1221 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1222 and ended by a 16-byte aligned argument. This is because, from the point of view of
1223 the callee, argument alignment is computed from the bottom up. */
1224 /* for struct arguments, we need to call memcpy and the function
1225 call breaks register passing arguments we are preparing.
1226 So, we process arguments which will be passed by stack first. */
1227 gen_reg
= nb_reg_args
;
1228 sse_reg
= nb_sse_args
;
1231 while (run_start
!= nb_args
) {
1232 int run_gen_reg
= gen_reg
, run_sse_reg
= sse_reg
;
1236 for(i
= run_start
; (i
< nb_args
) && (run_end
== nb_args
); i
++) {
1237 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1239 case x86_64_mode_memory
:
1240 case x86_64_mode_x87
:
1245 stack_adjust
+= size
;
1248 case x86_64_mode_sse
:
1249 sse_reg
-= reg_count
;
1250 if (sse_reg
+ reg_count
> 8) goto stack_arg
;
1253 case x86_64_mode_integer
:
1254 gen_reg
-= reg_count
;
1255 if (gen_reg
+ reg_count
> REGN
) goto stack_arg
;
1257 default: break; /* nothing to be done for x86_64_mode_none */
1261 gen_reg
= run_gen_reg
;
1262 sse_reg
= run_sse_reg
;
1264 /* adjust stack to align SSE boundary */
1265 if (stack_adjust
&= 15) {
1266 /* fetch cpu flag before the following sub will change the value */
1267 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
1270 stack_adjust
= 16 - stack_adjust
;
1272 oad(0xec81, stack_adjust
); /* sub $xxx, %rsp */
1273 args_size
+= stack_adjust
;
1276 for(i
= run_start
; i
< run_end
;) {
1277 /* Swap argument to top, it will possibly be changed here,
1278 and might use more temps. At the end of the loop we keep
1279 in on the stack and swap it back to its original position
1280 if it is a register. */
1281 SValue tmp
= vtop
[0];
1286 mode
= classify_x86_64_arg(&vtop
->type
, NULL
, &size
, &align
, ®_count
);
1288 switch (vtop
->type
.t
& VT_BTYPE
) {
1290 if (mode
== x86_64_mode_sse
) {
1292 sse_reg
-= reg_count
;
1295 } else if (mode
== x86_64_mode_integer
) {
1297 gen_reg
-= reg_count
;
1303 /* allocate the necessary size on stack */
1305 oad(0xec81, size
); /* sub $xxx, %rsp */
1306 /* generate structure store */
1307 r
= get_reg(RC_INT
);
1308 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1309 o(0xe0 + REG_VALUE(r
));
1310 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1323 assert(mode
== x86_64_mode_sse
);
1327 o(0x50); /* push $rax */
1328 /* movq %xmmN, (%rsp) */
1330 o(0x04 + REG_VALUE(r
)*8);
1339 assert(mode
== x86_64_mode_integer
);
1341 /* XXX: implicit cast ? */
1342 if (gen_reg
> REGN
) {
1345 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1353 /* And swap the argument back to it's original position. */
1360 assert((vtop
->type
.t
== tmp
.type
.t
) && (vtop
->r
== tmp
.r
));
1369 /* handle 16 byte aligned arguments at end of run */
1370 run_start
= i
= run_end
;
1371 while (i
< nb_args
) {
1372 /* Rotate argument to top since it will always be popped */
1373 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1379 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1381 oad(0xec8148, size
); /* sub $xxx, %rsp */
1382 o(0x7cdb); /* fstpt 0(%rsp) */
1387 assert(mode
== x86_64_mode_memory
);
1389 /* allocate the necessary size on stack */
1391 oad(0xec81, size
); /* sub $xxx, %rsp */
1392 /* generate structure store */
1393 r
= get_reg(RC_INT
);
1394 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1395 o(0xe0 + REG_VALUE(r
));
1396 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1407 /* XXX This should be superfluous. */
1408 save_regs(0); /* save used temporary registers */
1410 /* then, we prepare register passing arguments.
1411 Note that we cannot set RDX and RCX in this loop because gv()
1412 may break these temporary registers. Let's use R10 and R11
1414 assert(gen_reg
<= REGN
);
1415 assert(sse_reg
<= 8);
1416 for(i
= 0; i
< nb_args
; i
++) {
1417 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1418 /* Alter stack entry type so that gv() knows how to treat it */
1420 if (mode
== x86_64_mode_sse
) {
1421 if (reg_count
== 2) {
1423 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1424 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1425 /* movaps %xmm0, %xmmN */
1427 o(0xc0 + (sse_reg
<< 3));
1428 /* movaps %xmm1, %xmmN */
1430 o(0xc1 + ((sse_reg
+1) << 3));
1433 assert(reg_count
== 1);
1435 /* Load directly to register */
1436 gv(RC_XMM0
<< sse_reg
);
1438 } else if (mode
== x86_64_mode_integer
) {
1440 /* XXX: implicit cast ? */
1442 gen_reg
-= reg_count
;
1444 d
= arg_prepare_reg(gen_reg
);
1445 orex(1,d
,r
,0x89); /* mov */
1446 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1447 if (reg_count
== 2) {
1448 d
= arg_prepare_reg(gen_reg
+1);
1449 orex(1,d
,vtop
->r2
,0x89); /* mov */
1450 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1455 assert(gen_reg
== 0);
1456 assert(sse_reg
== 0);
1458 /* We shouldn't have many operands on the stack anymore, but the
1459 call address itself is still there, and it might be in %eax
1460 (or edx/ecx) currently, which the below writes would clobber.
1461 So evict all remaining operands here. */
1464 /* Copy R10 and R11 into RDX and RCX, respectively */
1465 if (nb_reg_args
> 2) {
1466 o(0xd2894c); /* mov %r10, %rdx */
1467 if (nb_reg_args
> 3) {
1468 o(0xd9894c); /* mov %r11, %rcx */
1472 if (vtop
->type
.ref
->c
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1473 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1481 #define FUNC_PROLOG_SIZE 11
1483 static void push_arg_reg(int i
) {
1485 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1488 /* generate function prolog of type 't' */
1489 void gfunc_prolog(CType
*func_type
)
1492 int i
, addr
, align
, size
, reg_count
;
1493 int param_addr
= 0, reg_param_index
, sse_param_index
;
1497 sym
= func_type
->ref
;
1498 addr
= PTR_SIZE
* 2;
1500 ind
+= FUNC_PROLOG_SIZE
;
1501 func_sub_sp_offset
= ind
;
1504 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
1505 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1506 seen_reg_num
= seen_sse_num
= 0;
1507 /* frame pointer and return address */
1508 seen_stack_size
= PTR_SIZE
* 2;
1509 /* count the number of seen parameters */
1510 sym
= func_type
->ref
;
1511 while ((sym
= sym
->next
) != NULL
) {
1513 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1517 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1520 case x86_64_mode_integer
:
1521 if (seen_reg_num
+ reg_count
<= 8) {
1522 seen_reg_num
+= reg_count
;
1529 case x86_64_mode_sse
:
1530 if (seen_sse_num
+ reg_count
<= 8) {
1531 seen_sse_num
+= reg_count
;
1541 /* movl $0x????????, -0x10(%rbp) */
1543 gen_le32(seen_reg_num
* 8);
1544 /* movl $0x????????, -0xc(%rbp) */
1546 gen_le32(seen_sse_num
* 16 + 48);
1547 /* movl $0x????????, -0x8(%rbp) */
1549 gen_le32(seen_stack_size
);
1551 /* save all register passing arguments */
1552 for (i
= 0; i
< 8; i
++) {
1554 if (!tcc_state
->nosse
) {
1555 o(0xd60f66); /* movq */
1556 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1558 /* movq $0, loc+8(%rbp) */
1563 for (i
= 0; i
< REGN
; i
++) {
1564 push_arg_reg(REGN
-1-i
);
1568 sym
= func_type
->ref
;
1569 reg_param_index
= 0;
1570 sse_param_index
= 0;
1572 /* if the function returns a structure, then add an
1573 implicit pointer parameter */
1574 func_vt
= sym
->type
;
1575 mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1576 if (mode
== x86_64_mode_memory
) {
1577 push_arg_reg(reg_param_index
);
1581 /* define parameters */
1582 while ((sym
= sym
->next
) != NULL
) {
1584 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1586 case x86_64_mode_sse
:
1587 if (tcc_state
->nosse
)
1588 tcc_error("SSE disabled but floating point arguments used");
1589 if (sse_param_index
+ reg_count
<= 8) {
1590 /* save arguments passed by register */
1591 loc
-= reg_count
* 8;
1593 for (i
= 0; i
< reg_count
; ++i
) {
1594 o(0xd60f66); /* movq */
1595 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1599 addr
= (addr
+ align
- 1) & -align
;
1605 case x86_64_mode_memory
:
1606 case x86_64_mode_x87
:
1607 addr
= (addr
+ align
- 1) & -align
;
1612 case x86_64_mode_integer
: {
1613 if (reg_param_index
+ reg_count
<= REGN
) {
1614 /* save arguments passed by register */
1615 loc
-= reg_count
* 8;
1617 for (i
= 0; i
< reg_count
; ++i
) {
1618 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1622 addr
= (addr
+ align
- 1) & -align
;
1628 default: break; /* nothing to be done for x86_64_mode_none */
1630 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1631 VT_LOCAL
| VT_LVAL
, param_addr
);
1634 #ifdef CONFIG_TCC_BCHECK
1635 /* leave some room for bound checking code */
1636 if (tcc_state
->do_bounds_check
) {
1637 func_bound_offset
= lbounds_section
->data_offset
;
1638 func_bound_ind
= ind
;
1639 oad(0xb8, 0); /* lbound section pointer */
1640 o(0xc78948); /* mov %rax,%rdi ## first arg in %rdi, this must be ptr */
1641 oad(0xb8, 0); /* call to function */
1646 /* generate function epilog */
1647 void gfunc_epilog(void)
1651 #ifdef CONFIG_TCC_BCHECK
1652 if (tcc_state
->do_bounds_check
1653 && func_bound_offset
!= lbounds_section
->data_offset
)
1659 /* add end of table info */
1660 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
1663 /* generate bound local allocation */
1664 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
1665 func_bound_offset
, lbounds_section
->data_offset
);
1667 ind
= func_bound_ind
;
1668 greloc(cur_text_section
, sym_data
, ind
+ 1, R_386_32
);
1670 gen_static_call(TOK___bound_local_new
);
1673 /* generate bound check local freeing */
1674 o(0x5250); /* save returned value, if any */
1675 greloc(cur_text_section
, sym_data
, ind
+ 1, R_386_32
);
1676 oad(0xb8, 0); /* mov xxx, %rax */
1677 o(0xc78948); /* mov %rax,%rdi # first arg in %rdi, this must be ptr */
1678 gen_static_call(TOK___bound_local_delete
);
1679 o(0x585a); /* restore returned value, if any */
1682 o(0xc9); /* leave */
1683 if (func_ret_sub
== 0) {
1686 o(0xc2); /* ret n */
1688 g(func_ret_sub
>> 8);
1690 /* align local size to word & save local variables */
1691 v
= (-loc
+ 15) & -16;
1693 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1694 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1695 o(0xec8148); /* sub rsp, stacksize */
1702 /* generate a jump to a label */
1705 return gjmp2(0xe9, t
);
1708 /* generate a jump to a fixed address */
1709 void gjmp_addr(int a
)
1717 oad(0xe9, a
- ind
- 5);
1721 ST_FUNC
void gtst_addr(int inv
, int a
)
1723 int v
= vtop
->r
& VT_VALMASK
;
1725 inv
^= (vtop
--)->c
.i
;
1732 oad(inv
- 16, a
- 4);
1734 } else if ((v
& ~1) == VT_JMP
) {
1735 if ((v
& 1) != inv
) {
1747 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1748 ST_FUNC
int gtst(int inv
, int t
)
1750 int v
= vtop
->r
& VT_VALMASK
;
1752 if (nocode_wanted
) {
1754 } else if (v
== VT_CMP
) {
1755 /* fast case : can jump directly since flags are set */
1756 if (vtop
->c
.i
& 0x100)
1758 /* This was a float compare. If the parity flag is set
1759 the result was unordered. For anything except != this
1760 means false and we don't jump (anding both conditions).
1761 For != this means true (oring both).
1762 Take care about inverting the test. We need to jump
1763 to our target if the result was unordered and test wasn't NE,
1764 otherwise if unordered we don't want to jump. */
1765 vtop
->c
.i
&= ~0x100;
1766 if (inv
== (vtop
->c
.i
== TOK_NE
))
1767 o(0x067a); /* jp +6 */
1771 t
= gjmp2(0x8a, t
); /* jp t */
1775 t
= gjmp2((vtop
->c
.i
- 16) ^ inv
, t
);
1776 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1777 /* && or || optimization */
1778 if ((v
& 1) == inv
) {
1779 /* insert vtop->c jump list in t */
1780 uint32_t n1
, n
= vtop
->c
.i
;
1782 while ((n1
= read32le(cur_text_section
->data
+ n
)))
1784 write32le(cur_text_section
->data
+ n
, t
);
1796 /* generate an integer binary operation */
1797 void gen_opi(int op
)
1802 ll
= is64_type(vtop
[-1].type
.t
);
1803 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1804 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1808 case TOK_ADDC1
: /* add with carry generation */
1811 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1818 /* XXX: generate inc and dec for smaller code ? */
1819 orex(ll
, r
, 0, 0x83);
1820 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1823 orex(ll
, r
, 0, 0x81);
1824 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1827 gv2(RC_INT
, RC_INT
);
1830 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1831 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1834 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1840 case TOK_SUBC1
: /* sub with carry generation */
1843 case TOK_ADDC2
: /* add with carry use */
1846 case TOK_SUBC2
: /* sub with carry use */
1859 gv2(RC_INT
, RC_INT
);
1862 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1863 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1875 opc
= 0xc0 | (opc
<< 3);
1881 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1882 o(opc
| REG_VALUE(r
));
1883 g(vtop
->c
.i
& (ll
? 63 : 31));
1885 /* we generate the shift in ecx */
1886 gv2(RC_INT
, RC_RCX
);
1888 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1889 o(opc
| REG_VALUE(r
));
1902 /* first operand must be in eax */
1903 /* XXX: need better constraint for second operand */
1904 gv2(RC_RAX
, RC_RCX
);
1909 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1910 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1911 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1912 if (op
== '%' || op
== TOK_UMOD
)
1924 void gen_opl(int op
)
1929 /* generate a floating point operation 'v = t1 op t2' instruction. The
1930 two operands are guaranted to have the same floating point type */
1931 /* XXX: need to use ST1 too */
1932 void gen_opf(int op
)
1934 int a
, ft
, fc
, swapped
, r
;
1936 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1938 /* convert constants to memory references */
1939 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1944 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1947 /* must put at least one value in the floating point register */
1948 if ((vtop
[-1].r
& VT_LVAL
) &&
1949 (vtop
[0].r
& VT_LVAL
)) {
1955 /* swap the stack if needed so that t1 is the register and t2 is
1956 the memory reference */
1957 if (vtop
[-1].r
& VT_LVAL
) {
1961 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1962 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1963 /* load on stack second operand */
1964 load(TREG_ST0
, vtop
);
1965 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1966 if (op
== TOK_GE
|| op
== TOK_GT
)
1968 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1971 o(0xc9d9); /* fxch %st(1) */
1972 if (op
== TOK_EQ
|| op
== TOK_NE
)
1973 o(0xe9da); /* fucompp */
1975 o(0xd9de); /* fcompp */
1976 o(0xe0df); /* fnstsw %ax */
1978 o(0x45e480); /* and $0x45, %ah */
1979 o(0x40fC80); /* cmp $0x40, %ah */
1980 } else if (op
== TOK_NE
) {
1981 o(0x45e480); /* and $0x45, %ah */
1982 o(0x40f480); /* xor $0x40, %ah */
1984 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1985 o(0x05c4f6); /* test $0x05, %ah */
1988 o(0x45c4f6); /* test $0x45, %ah */
1995 /* no memory reference possible for long double operations */
1996 load(TREG_ST0
, vtop
);
2020 o(0xde); /* fxxxp %st, %st(1) */
2025 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
2026 /* if saved lvalue, then we must reload it */
2029 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
2031 r
= get_reg(RC_INT
);
2033 v1
.r
= VT_LOCAL
| VT_LVAL
;
2039 if (op
== TOK_EQ
|| op
== TOK_NE
) {
2042 if (op
== TOK_LE
|| op
== TOK_LT
)
2044 if (op
== TOK_LE
|| op
== TOK_GE
) {
2045 op
= 0x93; /* setae */
2047 op
= 0x97; /* seta */
2055 assert(!(vtop
[-1].r
& VT_LVAL
));
2057 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
2059 if (op
== TOK_EQ
|| op
== TOK_NE
)
2060 o(0x2e0f); /* ucomisd */
2062 o(0x2f0f); /* comisd */
2064 if (vtop
->r
& VT_LVAL
) {
2065 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2067 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2072 vtop
->c
.i
= op
| 0x100;
2074 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
2092 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
2095 /* if saved lvalue, then we must reload it */
2096 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
2098 r
= get_reg(RC_INT
);
2100 v1
.r
= VT_LOCAL
| VT_LVAL
;
2106 assert(!(vtop
[-1].r
& VT_LVAL
));
2108 assert(vtop
->r
& VT_LVAL
);
2113 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2121 if (vtop
->r
& VT_LVAL
) {
2122 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2124 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2132 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2133 and 'long long' cases. */
2134 void gen_cvt_itof(int t
)
2136 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2139 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2140 /* signed long long to float/double/long double (unsigned case
2141 is handled generically) */
2142 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2143 o(0x242cdf); /* fildll (%rsp) */
2144 o(0x08c48348); /* add $8, %rsp */
2145 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2146 (VT_INT
| VT_UNSIGNED
)) {
2147 /* unsigned int to float/double/long double */
2148 o(0x6a); /* push $0 */
2150 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2151 o(0x242cdf); /* fildll (%rsp) */
2152 o(0x10c48348); /* add $16, %rsp */
2154 /* int to float/double/long double */
2155 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2156 o(0x2404db); /* fildl (%rsp) */
2157 o(0x08c48348); /* add $8, %rsp */
2161 int r
= get_reg(RC_FLOAT
);
2163 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2164 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2165 (VT_INT
| VT_UNSIGNED
) ||
2166 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2170 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2175 /* convert from one floating point type to another */
2176 void gen_cvt_ftof(int t
)
2184 if (bt
== VT_FLOAT
) {
2186 if (tbt
== VT_DOUBLE
) {
2187 o(0x140f); /* unpcklps */
2188 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2189 o(0x5a0f); /* cvtps2pd */
2190 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2191 } else if (tbt
== VT_LDOUBLE
) {
2193 /* movss %xmm0,-0x10(%rsp) */
2195 o(0x44 + REG_VALUE(vtop
->r
)*8);
2197 o(0xf02444d9); /* flds -0x10(%rsp) */
2200 } else if (bt
== VT_DOUBLE
) {
2202 if (tbt
== VT_FLOAT
) {
2203 o(0x140f66); /* unpcklpd */
2204 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2205 o(0x5a0f66); /* cvtpd2ps */
2206 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2207 } else if (tbt
== VT_LDOUBLE
) {
2209 /* movsd %xmm0,-0x10(%rsp) */
2211 o(0x44 + REG_VALUE(vtop
->r
)*8);
2213 o(0xf02444dd); /* fldl -0x10(%rsp) */
2219 r
= get_reg(RC_FLOAT
);
2220 if (tbt
== VT_DOUBLE
) {
2221 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2222 /* movsd -0x10(%rsp),%xmm0 */
2224 o(0x44 + REG_VALUE(r
)*8);
2227 } else if (tbt
== VT_FLOAT
) {
2228 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2229 /* movss -0x10(%rsp),%xmm0 */
2231 o(0x44 + REG_VALUE(r
)*8);
2238 /* convert fp to int 't' type */
2239 void gen_cvt_ftoi(int t
)
2241 int ft
, bt
, size
, r
;
2244 if (bt
== VT_LDOUBLE
) {
2245 gen_cvt_ftof(VT_DOUBLE
);
2255 r
= get_reg(RC_INT
);
2256 if (bt
== VT_FLOAT
) {
2258 } else if (bt
== VT_DOUBLE
) {
2263 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2264 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2268 /* computed goto support */
2275 /* Save the stack pointer onto the stack and return the location of its address */
2276 ST_FUNC
void gen_vla_sp_save(int addr
) {
2277 /* mov %rsp,addr(%rbp)*/
2278 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2281 /* Restore the SP from a location on the stack */
2282 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2283 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2286 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2287 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2288 #ifdef TCC_TARGET_PE
2289 /* alloca does more than just adjust %rsp on Windows */
2290 vpush_global_sym(&func_old_type
, TOK_alloca
);
2291 vswap(); /* Move alloca ref past allocation size */
2295 r
= gv(RC_INT
); /* allocation size */
2298 o(0xe0 | REG_VALUE(r
));
2299 /* We align to 16 bytes rather than align */
2307 /* end of x86-64 code generator */
2308 /*************************************************************/
2309 #endif /* ! TARGET_DEFS_ONLY */
2310 /******************************************************/