2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
38 #define RC_ST0 0x0080 /* only for long double */
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
115 ST_DATA
const char * const target_machine_defs
=
120 ST_DATA
const int reg_classes
[NB_REGS
] = {
121 /* eax */ RC_INT
| RC_RAX
,
122 /* ecx */ RC_INT
| RC_RCX
,
123 /* edx */ RC_INT
| RC_RDX
,
137 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
138 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
139 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
140 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
141 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
142 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
143 /* xmm6 an xmm7 are included so gv() can be used on them,
144 but they are not tagged with RC_FLOAT because they are
145 callee saved on Windows */
151 static unsigned long func_sub_sp_offset
;
152 static int func_ret_sub
;
154 #if defined(CONFIG_TCC_BCHECK)
155 static addr_t func_bound_offset
;
156 static unsigned long func_bound_ind
;
157 ST_DATA
int func_bound_add_epilog
;
161 static int func_scratch
, func_alloca
;
164 /* XXX: make it faster ? */
165 ST_FUNC
void g(int c
)
171 if (ind1
> cur_text_section
->data_allocated
)
172 section_realloc(cur_text_section
, ind1
);
173 cur_text_section
->data
[ind
] = c
;
177 ST_FUNC
void o(unsigned int c
)
185 ST_FUNC
void gen_le16(int v
)
191 ST_FUNC
void gen_le32(int c
)
199 ST_FUNC
void gen_le64(int64_t c
)
211 static void orex(int ll
, int r
, int r2
, int b
)
213 if ((r
& VT_VALMASK
) >= VT_CONST
)
215 if ((r2
& VT_VALMASK
) >= VT_CONST
)
217 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
218 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
222 /* output a symbol and patch all calls to it */
223 ST_FUNC
void gsym_addr(int t
, int a
)
226 unsigned char *ptr
= cur_text_section
->data
+ t
;
227 uint32_t n
= read32le(ptr
); /* next value */
228 write32le(ptr
, a
< 0 ? -a
: a
- t
- 4);
233 static int is64_type(int t
)
235 return ((t
& VT_BTYPE
) == VT_PTR
||
236 (t
& VT_BTYPE
) == VT_FUNC
||
237 (t
& VT_BTYPE
) == VT_LLONG
);
240 /* instruction + 4 bytes data. Return the address of the data */
241 static int oad(int c
, int s
)
252 /* generate jmp to a label */
253 #define gjmp2(instr,lbl) oad(instr,lbl)
255 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
258 greloca(cur_text_section
, sym
, ind
, R_X86_64_32S
, c
), c
=0;
262 /* output constant with relocation if 'r & VT_SYM' is true */
263 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
266 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
270 /* output constant with relocation if 'r & VT_SYM' is true */
271 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
274 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
278 /* output got address with relocation */
279 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
282 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
283 get_tok_str(sym
->v
, NULL
), c
, r
,
284 cur_text_section
->data
[ind
-3],
285 cur_text_section
->data
[ind
-2],
286 cur_text_section
->data
[ind
-1]
289 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
292 /* we use add c, %xxx for displacement */
294 o(0xc0 + REG_VALUE(r
));
299 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
301 op_reg
= REG_VALUE(op_reg
) << 3;
302 if ((r
& VT_VALMASK
) == VT_CONST
) {
303 /* constant memory reference */
305 /* Absolute memory reference */
306 o(0x04 | op_reg
); /* [sib] | destreg */
307 oad(0x25, c
); /* disp32 */
309 o(0x05 | op_reg
); /* (%rip)+disp32 | destreg */
311 gen_gotpcrel(r
, sym
, c
);
313 gen_addrpc32(r
, sym
, c
);
316 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
317 /* currently, we use only ebp as base */
319 /* short reference */
323 oad(0x85 | op_reg
, c
);
325 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
327 g(0x80 | op_reg
| REG_VALUE(r
));
330 g(0x00 | op_reg
| REG_VALUE(r
));
333 g(0x00 | op_reg
| REG_VALUE(r
));
337 /* generate a modrm reference. 'op_reg' contains the additional 3
339 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
341 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
344 /* generate a modrm reference. 'op_reg' contains the additional 3
346 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
349 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
350 orex(1, r
, op_reg
, opcode
);
351 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
355 /* load 'r' from value 'sv' */
356 void load(int r
, SValue
*sv
)
358 int v
, t
, ft
, fc
, fr
;
363 sv
= pe_getimport(sv
, &v2
);
367 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
369 if (fc
!= sv
->c
.i
&& (fr
& VT_SYM
))
370 tcc_error("64 bit addend in load");
372 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
374 #ifndef TCC_TARGET_PE
375 /* we use indirect access via got */
376 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
377 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
378 /* use the result register as a temporal register */
379 int tr
= r
| TREG_MEM
;
381 /* we cannot use float registers as a temporal register */
382 tr
= get_reg(RC_INT
) | TREG_MEM
;
384 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
386 /* load from the temporal register */
394 if (v
== VT_LLOCAL
) {
396 v1
.r
= VT_LOCAL
| VT_LVAL
;
399 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
400 fr
= get_reg(RC_INT
);
404 /* If the addends doesn't fit into a 32bit signed
405 we must use a 64bit move. We've checked above
406 that this doesn't have a sym associated. */
407 v1
.type
.t
= VT_LLONG
;
411 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
412 fr
= get_reg(RC_INT
);
417 /* Like GCC we can load from small enough properly sized
418 structs and unions as well.
419 XXX maybe move to generic operand handling, but should
420 occur only with asm, so tccasm.c might also be a better place */
421 if ((ft
& VT_BTYPE
) == VT_STRUCT
) {
423 switch (type_size(&sv
->type
, &align
)) {
424 case 1: ft
= VT_BYTE
; break;
425 case 2: ft
= VT_SHORT
; break;
426 case 4: ft
= VT_INT
; break;
427 case 8: ft
= VT_LLONG
; break;
429 tcc_error("invalid aggregate type for register load");
433 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
435 r
= REG_VALUE(r
); /* movd */
436 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
437 b
= 0x7e0ff3; /* movq */
439 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
440 b
= 0xdb, r
= 5; /* fldt */
441 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
442 b
= 0xbe0f; /* movsbl */
443 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
444 b
= 0xb60f; /* movzbl */
445 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
446 b
= 0xbf0f; /* movswl */
447 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
448 b
= 0xb70f; /* movzwl */
449 } else if ((ft
& VT_TYPE
) == (VT_VOID
)) {
450 /* Can happen with zero size structs */
453 assert(((ft
& VT_BTYPE
) == VT_INT
)
454 || ((ft
& VT_BTYPE
) == VT_LLONG
)
455 || ((ft
& VT_BTYPE
) == VT_PTR
)
456 || ((ft
& VT_BTYPE
) == VT_FUNC
)
462 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
465 gen_modrm(r
, fr
, sv
->sym
, fc
);
472 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
473 gen_addrpc32(fr
, sv
->sym
, fc
);
475 if (sv
->sym
->type
.t
& VT_STATIC
) {
477 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
478 gen_addrpc32(fr
, sv
->sym
, fc
);
481 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
482 gen_gotpcrel(r
, sv
->sym
, fc
);
485 } else if (is64_type(ft
)) {
486 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
489 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
492 } else if (v
== VT_LOCAL
) {
493 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
494 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
495 } else if (v
== VT_CMP
) {
500 /* This was a float compare. If the parity bit is
501 set the result was unordered, meaning false for everything
502 except TOK_NE, and true for TOK_NE. */
503 orex(0, r
, 0, 0xb0 + REG_VALUE(r
)); /* mov $0/1,%al */
504 g(v
^ fc
^ (v
== TOK_NE
));
505 o(0x037a + (REX_BASE(r
) << 8));
507 orex(0,r
,0, 0x0f); /* setxx %br */
509 o(0xc0 + REG_VALUE(r
));
511 o(0xc0b6 + REG_VALUE(r
) * 0x900); /* movzbl %al, %eax */
512 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
515 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
516 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
519 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
521 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
523 /* gen_cvt_ftof(VT_DOUBLE); */
524 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
525 /* movsd -0x10(%rsp),%xmmN */
527 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
530 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
531 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
534 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
537 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
539 } else if (r
== TREG_ST0
) {
540 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
541 /* gen_cvt_ftof(VT_LDOUBLE); */
542 /* movsd %xmmN,-0x10(%rsp) */
544 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
546 o(0xf02444dd); /* fldl -0x10(%rsp) */
548 orex(is64_type(ft
), r
, v
, 0x89);
549 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
555 /* store register 'r' in lvalue 'v' */
556 void store(int r
, SValue
*v
)
560 /* store the REX prefix in this variable when PIC is enabled */
565 v
= pe_getimport(v
, &v2
);
568 fr
= v
->r
& VT_VALMASK
;
571 if (fc
!= v
->c
.i
&& (fr
& VT_SYM
))
572 tcc_error("64 bit addend in store");
573 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
576 #ifndef TCC_TARGET_PE
577 /* we need to access the variable via got */
580 && !(v
->sym
->type
.t
& VT_STATIC
)) {
581 /* mov xx(%rip), %r11 */
583 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
584 pic
= is64_type(bt
) ? 0x49 : 0x41;
588 /* XXX: incorrect if float reg to reg */
589 if (bt
== VT_FLOAT
) {
592 o(0x7e0f); /* movd */
594 } else if (bt
== VT_DOUBLE
) {
597 o(0xd60f); /* movq */
599 } else if (bt
== VT_LDOUBLE
) {
600 o(0xc0d9); /* fld %st(0) */
608 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
610 else if (is64_type(bt
))
616 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
621 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
622 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
623 } else if (fr
!= r
) {
624 orex(1, fr
, r
, op64
);
625 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
628 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
629 gen_modrm(r
, v
->r
, v
->sym
, fc
);
630 } else if (fr
!= r
) {
631 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
636 /* 'is_jmp' is '1' if it is a jump */
637 static void gcall_or_jmp(int is_jmp
)
640 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
641 ((vtop
->r
& VT_SYM
) && (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
642 /* constant symbolic case -> simple relocation */
644 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
646 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
648 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
650 /* otherwise, indirect call */
654 o(0xff); /* call/jmp *r */
655 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
659 #if defined(CONFIG_TCC_BCHECK)
661 static void gen_bounds_call(int v
)
663 Sym
*sym
= external_helper_sym(v
);
666 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
668 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PLT32
, -4);
673 # define TREG_FASTCALL_1 TREG_RCX
675 # define TREG_FASTCALL_1 TREG_RDI
678 static void gen_bounds_prolog(void)
680 /* leave some room for bound checking code */
681 func_bound_offset
= lbounds_section
->data_offset
;
682 func_bound_ind
= ind
;
683 func_bound_add_epilog
= 0;
684 o(0x0d8d48 + ((TREG_FASTCALL_1
== TREG_RDI
) * 0x300000)); /*lbound section pointer */
686 oad(0xb8, 0); /* call to function */
689 static void gen_bounds_epilog(void)
694 int offset_modified
= func_bound_offset
!= lbounds_section
->data_offset
;
696 if (!offset_modified
&& !func_bound_add_epilog
)
699 /* add end of table info */
700 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
703 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
704 func_bound_offset
, lbounds_section
->data_offset
);
706 /* generate bound local allocation */
707 if (offset_modified
) {
709 ind
= func_bound_ind
;
710 greloca(cur_text_section
, sym_data
, ind
+ 3, R_X86_64_PC32
, -4);
712 gen_bounds_call(TOK___bound_local_new
);
716 /* generate bound check local freeing */
717 o(0x5250); /* save returned value, if any */
718 greloca(cur_text_section
, sym_data
, ind
+ 3, R_X86_64_PC32
, -4);
719 o(0x0d8d48 + ((TREG_FASTCALL_1
== TREG_RDI
) * 0x300000)); /* lea xxx(%rip), %rcx/rdi */
721 gen_bounds_call(TOK___bound_local_delete
);
722 o(0x585a); /* restore returned value, if any */
729 static const uint8_t arg_regs
[REGN
] = {
730 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
733 /* Prepare arguments in R10 and R11 rather than RCX and RDX
734 because gv() will not ever use these */
735 static int arg_prepare_reg(int idx
) {
736 if (idx
== 0 || idx
== 1)
737 /* idx=0: r10, idx=1: r11 */
740 return idx
>= 0 && idx
< REGN
? arg_regs
[idx
] : 0;
743 /* Generate function call. The function address is pushed first, then
744 all the parameters in call order. This functions pops all the
745 parameters and the function address. */
747 static void gen_offs_sp(int b
, int r
, int d
)
749 orex(1,0,r
& 0x100 ? 0 : r
, b
);
751 o(0x2444 | (REG_VALUE(r
) << 3));
754 o(0x2484 | (REG_VALUE(r
) << 3));
759 static int using_regs(int size
)
761 return !(size
> 8 || (size
& (size
- 1)));
764 /* Return the number of registers needed to return the struct, or 0 if
765 returning via struct pointer. */
766 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
769 *ret_align
= 1; // Never have to re-align return values for x86-64
771 size
= type_size(vt
, &align
);
772 if (!using_regs(size
))
786 static int is_sse_float(int t
) {
789 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
792 static int gfunc_arg_size(CType
*type
) {
794 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
796 return type_size(type
, &align
);
799 void gfunc_call(int nb_args
)
801 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
804 #ifdef CONFIG_TCC_BCHECK
805 if (tcc_state
->do_bounds_check
)
806 gbound_args(nb_args
);
809 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
812 /* for struct arguments, we need to call memcpy and the function
813 call breaks register passing arguments we are preparing.
814 So, we process arguments which will be passed by stack first. */
815 struct_size
= args_size
;
816 for(i
= 0; i
< nb_args
; i
++) {
821 bt
= (sv
->type
.t
& VT_BTYPE
);
822 size
= gfunc_arg_size(&sv
->type
);
824 if (using_regs(size
))
825 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
827 if (bt
== VT_STRUCT
) {
828 /* align to stack align size */
829 size
= (size
+ 15) & ~15;
830 /* generate structure store */
832 gen_offs_sp(0x8d, r
, struct_size
);
835 /* generate memcpy call */
836 vset(&sv
->type
, r
| VT_LVAL
, 0);
840 } else if (bt
== VT_LDOUBLE
) {
842 gen_offs_sp(0xdb, 0x107, struct_size
);
847 if (func_scratch
< struct_size
)
848 func_scratch
= struct_size
;
851 struct_size
= args_size
;
853 for(i
= 0; i
< nb_args
; i
++) {
855 bt
= (vtop
->type
.t
& VT_BTYPE
);
857 size
= gfunc_arg_size(&vtop
->type
);
858 if (!using_regs(size
)) {
859 /* align to stack align size */
860 size
= (size
+ 15) & ~15;
863 gen_offs_sp(0x8d, d
, struct_size
);
864 gen_offs_sp(0x89, d
, arg
*8);
866 d
= arg_prepare_reg(arg
);
867 gen_offs_sp(0x8d, d
, struct_size
);
871 if (is_sse_float(vtop
->type
.t
)) {
872 if (tcc_state
->nosse
)
873 tcc_error("SSE disabled");
876 /* movq %xmm0, j*8(%rsp) */
877 gen_offs_sp(0xd60f66, 0x100, arg
*8);
879 /* Load directly to xmmN register */
881 d
= arg_prepare_reg(arg
);
882 /* mov %xmmN, %rxx */
885 o(0xc0 + arg
*8 + REG_VALUE(d
));
888 if (bt
== VT_STRUCT
) {
889 vtop
->type
.ref
= NULL
;
890 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
891 : size
> 1 ? VT_SHORT
: VT_BYTE
;
896 gen_offs_sp(0x89, r
, arg
*8);
898 d
= arg_prepare_reg(arg
);
899 orex(1,d
,r
,0x89); /* mov */
900 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
907 /* Copy R10 and R11 into RCX and RDX, respectively */
909 o(0xd1894c); /* mov %r10, %rcx */
911 o(0xda894c); /* mov %r11, %rdx */
917 if ((vtop
->r
& VT_SYM
) && vtop
->sym
->v
== TOK_alloca
) {
918 /* need to add the "func_scratch" area after alloca */
919 o(0x48); func_alloca
= oad(0x05, func_alloca
); /* add $NN, %rax */
920 #ifdef CONFIG_TCC_BCHECK
921 if (tcc_state
->do_bounds_check
)
922 gen_bounds_call(TOK___bound_alloca_nr
); /* new region */
929 #define FUNC_PROLOG_SIZE 11
931 /* generate function prolog of type 't' */
932 void gfunc_prolog(Sym
*func_sym
)
934 CType
*func_type
= &func_sym
->type
;
935 int addr
, reg_param_index
, bt
, size
;
945 ind
+= FUNC_PROLOG_SIZE
;
946 func_sub_sp_offset
= ind
;
949 sym
= func_type
->ref
;
951 /* if the function returns a structure, then add an
952 implicit pointer parameter */
953 size
= gfunc_arg_size(&func_vt
);
954 if (!using_regs(size
)) {
955 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
961 /* define parameters */
962 while ((sym
= sym
->next
) != NULL
) {
964 bt
= type
->t
& VT_BTYPE
;
965 size
= gfunc_arg_size(type
);
966 if (!using_regs(size
)) {
967 if (reg_param_index
< REGN
) {
968 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
970 sym_push(sym
->v
& ~SYM_FIELD
, type
,
971 VT_LLOCAL
| VT_LVAL
, addr
);
973 if (reg_param_index
< REGN
) {
974 /* save arguments passed by register */
975 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
976 if (tcc_state
->nosse
)
977 tcc_error("SSE disabled");
978 o(0xd60f66); /* movq */
979 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
981 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
984 sym_push(sym
->v
& ~SYM_FIELD
, type
,
985 VT_LOCAL
| VT_LVAL
, addr
);
991 while (reg_param_index
< REGN
) {
993 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
998 #ifdef CONFIG_TCC_BCHECK
999 if (tcc_state
->do_bounds_check
)
1000 gen_bounds_prolog();
1004 /* generate function epilog */
1005 void gfunc_epilog(void)
1009 /* align local size to word & save local variables */
1010 func_scratch
= (func_scratch
+ 15) & -16;
1011 loc
= (loc
& -16) - func_scratch
;
1013 #ifdef CONFIG_TCC_BCHECK
1014 if (tcc_state
->do_bounds_check
)
1015 gen_bounds_epilog();
1018 o(0xc9); /* leave */
1019 if (func_ret_sub
== 0) {
1022 o(0xc2); /* ret n */
1024 g(func_ret_sub
>> 8);
1028 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1032 Sym
*sym
= external_helper_sym(TOK___chkstk
);
1033 oad(0xb8, v
); /* mov stacksize, %eax */
1034 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1035 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
1036 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1038 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1039 o(0xec8148); /* sub rsp, stacksize */
1043 /* add the "func_scratch" area after each alloca seen */
1044 gsym_addr(func_alloca
, -func_scratch
);
1046 cur_text_section
->data_offset
= saved_ind
;
1047 pe_add_unwind_data(ind
, saved_ind
, v
);
1048 ind
= cur_text_section
->data_offset
;
1053 static void gadd_sp(int val
)
1055 if (val
== (char)val
) {
1059 oad(0xc48148, val
); /* add $xxx, %rsp */
1063 typedef enum X86_64_Mode
{
1066 x86_64_mode_integer
,
1071 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1075 else if (a
== x86_64_mode_none
)
1077 else if (b
== x86_64_mode_none
)
1079 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1080 return x86_64_mode_memory
;
1081 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1082 return x86_64_mode_integer
;
1083 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1084 return x86_64_mode_memory
;
1086 return x86_64_mode_sse
;
1089 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1094 switch (ty
->t
& VT_BTYPE
) {
1095 case VT_VOID
: return x86_64_mode_none
;
1104 return x86_64_mode_integer
;
1107 case VT_DOUBLE
: return x86_64_mode_sse
;
1109 case VT_LDOUBLE
: return x86_64_mode_x87
;
1114 mode
= x86_64_mode_none
;
1115 for (f
= f
->next
; f
; f
= f
->next
)
1116 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1124 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1127 int size
, align
, ret_t
= 0;
1129 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1134 mode
= x86_64_mode_integer
;
1136 size
= type_size(ty
, &align
);
1137 *psize
= (size
+ 7) & ~7;
1138 *palign
= (align
+ 7) & ~7;
1141 mode
= x86_64_mode_memory
;
1143 mode
= classify_x86_64_inner(ty
);
1145 case x86_64_mode_integer
:
1159 if ((ty
->t
& VT_BTYPE
) == VT_STRUCT
|| (ty
->t
& VT_UNSIGNED
))
1160 ret_t
|= VT_UNSIGNED
;
1164 case x86_64_mode_x87
:
1169 case x86_64_mode_sse
:
1175 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1178 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1191 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1193 /* This definition must be synced with stdarg.h */
1194 enum __va_arg_type
{
1195 __va_gen_reg
, __va_float_reg
, __va_stack
1197 int size
, align
, reg_count
;
1198 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1200 default: return __va_stack
;
1201 case x86_64_mode_integer
: return __va_gen_reg
;
1202 case x86_64_mode_sse
: return __va_float_reg
;
1206 /* Return the number of registers needed to return the struct, or 0 if
1207 returning via struct pointer. */
1208 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1210 int size
, align
, reg_count
;
1211 *ret_align
= 1; // Never have to re-align return values for x86-64
1213 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1217 static const uint8_t arg_regs
[REGN
] = {
1218 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1221 static int arg_prepare_reg(int idx
) {
1222 if (idx
== 2 || idx
== 3)
1223 /* idx=2: r10, idx=3: r11 */
1226 return idx
>= 0 && idx
< REGN
? arg_regs
[idx
] : 0;
1229 /* Generate function call. The function address is pushed first, then
1230 all the parameters in call order. This functions pops all the
1231 parameters and the function address. */
1232 void gfunc_call(int nb_args
)
1236 int size
, align
, r
, args_size
, stack_adjust
, i
, reg_count
, k
;
1237 int nb_reg_args
= 0;
1238 int nb_sse_args
= 0;
1239 int sse_reg
, gen_reg
;
1240 char *onstack
= tcc_malloc((nb_args
+ 1) * sizeof (char));
1242 #ifdef CONFIG_TCC_BCHECK
1243 if (tcc_state
->do_bounds_check
)
1244 gbound_args(nb_args
);
1247 /* calculate the number of integer/float register arguments, remember
1248 arguments to be passed via stack (in onstack[]), and also remember
1249 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1250 to be done in a left-to-right pass over arguments. */
1252 for(i
= nb_args
- 1; i
>= 0; i
--) {
1253 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1254 if (size
== 0) continue;
1255 if (mode
== x86_64_mode_sse
&& nb_sse_args
+ reg_count
<= 8) {
1256 nb_sse_args
+= reg_count
;
1258 } else if (mode
== x86_64_mode_integer
&& nb_reg_args
+ reg_count
<= REGN
) {
1259 nb_reg_args
+= reg_count
;
1261 } else if (mode
== x86_64_mode_none
) {
1264 if (align
== 16 && (stack_adjust
&= 15)) {
1269 stack_adjust
+= size
;
1273 if (nb_sse_args
&& tcc_state
->nosse
)
1274 tcc_error("SSE disabled but floating point arguments passed");
1276 /* fetch cpu flag before generating any code */
1277 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
)
1280 /* for struct arguments, we need to call memcpy and the function
1281 call breaks register passing arguments we are preparing.
1282 So, we process arguments which will be passed by stack first. */
1283 gen_reg
= nb_reg_args
;
1284 sse_reg
= nb_sse_args
;
1287 for (i
= k
= 0; i
< nb_args
;) {
1288 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1290 if (!onstack
[i
+ k
]) {
1294 /* Possibly adjust stack to align SSE boundary. We're processing
1295 args from right to left while allocating happens left to right
1296 (stack grows down), so the adjustment needs to happen _after_
1297 an argument that requires it. */
1299 o(0x50); /* push %rax; aka sub $8,%rsp */
1303 if (onstack
[i
+ k
] == 2)
1309 switch (vtop
->type
.t
& VT_BTYPE
) {
1311 /* allocate the necessary size on stack */
1313 oad(0xec81, size
); /* sub $xxx, %rsp */
1314 /* generate structure store */
1315 r
= get_reg(RC_INT
);
1316 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1317 o(0xe0 + REG_VALUE(r
));
1318 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1325 oad(0xec8148, size
); /* sub $xxx, %rsp */
1326 o(0x7cdb); /* fstpt 0(%rsp) */
1333 assert(mode
== x86_64_mode_sse
);
1335 o(0x50); /* push $rax */
1336 /* movq %xmmN, (%rsp) */
1338 o(0x04 + REG_VALUE(r
)*8);
1343 assert(mode
== x86_64_mode_integer
);
1345 /* XXX: implicit cast ? */
1347 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1359 /* XXX This should be superfluous. */
1360 save_regs(0); /* save used temporary registers */
1362 /* then, we prepare register passing arguments.
1363 Note that we cannot set RDX and RCX in this loop because gv()
1364 may break these temporary registers. Let's use R10 and R11
1366 assert(gen_reg
<= REGN
);
1367 assert(sse_reg
<= 8);
1368 for(i
= 0; i
< nb_args
; i
++) {
1369 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1370 if (size
== 0) continue;
1371 /* Alter stack entry type so that gv() knows how to treat it */
1373 if (mode
== x86_64_mode_sse
) {
1374 if (reg_count
== 2) {
1376 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1377 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1378 /* movaps %xmm1, %xmmN */
1380 o(0xc1 + ((sse_reg
+1) << 3));
1381 /* movaps %xmm0, %xmmN */
1383 o(0xc0 + (sse_reg
<< 3));
1386 assert(reg_count
== 1);
1388 /* Load directly to register */
1389 gv(RC_XMM0
<< sse_reg
);
1391 } else if (mode
== x86_64_mode_integer
) {
1393 /* XXX: implicit cast ? */
1395 gen_reg
-= reg_count
;
1397 d
= arg_prepare_reg(gen_reg
);
1398 orex(1,d
,r
,0x89); /* mov */
1399 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1400 if (reg_count
== 2) {
1401 d
= arg_prepare_reg(gen_reg
+1);
1402 orex(1,d
,vtop
->r2
,0x89); /* mov */
1403 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1408 assert(gen_reg
== 0);
1409 assert(sse_reg
== 0);
1411 /* We shouldn't have many operands on the stack anymore, but the
1412 call address itself is still there, and it might be in %eax
1413 (or edx/ecx) currently, which the below writes would clobber.
1414 So evict all remaining operands here. */
1417 /* Copy R10 and R11 into RDX and RCX, respectively */
1418 if (nb_reg_args
> 2) {
1419 o(0xd2894c); /* mov %r10, %rdx */
1420 if (nb_reg_args
> 3) {
1421 o(0xd9894c); /* mov %r11, %rcx */
1425 if (vtop
->type
.ref
->f
.func_type
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1426 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1433 #define FUNC_PROLOG_SIZE 11
1435 static void push_arg_reg(int i
) {
1437 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1440 /* generate function prolog of type 't' */
1441 void gfunc_prolog(Sym
*func_sym
)
1443 CType
*func_type
= &func_sym
->type
;
1444 X86_64_Mode mode
, ret_mode
;
1445 int i
, addr
, align
, size
, reg_count
;
1446 int param_addr
= 0, reg_param_index
, sse_param_index
;
1450 sym
= func_type
->ref
;
1451 addr
= PTR_SIZE
* 2;
1453 ind
+= FUNC_PROLOG_SIZE
;
1454 func_sub_sp_offset
= ind
;
1456 ret_mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1459 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1460 seen_reg_num
= ret_mode
== x86_64_mode_memory
;
1462 /* frame pointer and return address */
1463 seen_stack_size
= PTR_SIZE
* 2;
1464 /* count the number of seen parameters */
1465 sym
= func_type
->ref
;
1466 while ((sym
= sym
->next
) != NULL
) {
1468 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1472 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1475 case x86_64_mode_integer
:
1476 if (seen_reg_num
+ reg_count
> REGN
)
1478 seen_reg_num
+= reg_count
;
1481 case x86_64_mode_sse
:
1482 if (seen_sse_num
+ reg_count
> 8)
1484 seen_sse_num
+= reg_count
;
1490 /* movl $0x????????, -0x18(%rbp) */
1492 gen_le32(seen_reg_num
* 8);
1493 /* movl $0x????????, -0x14(%rbp) */
1495 gen_le32(seen_sse_num
* 16 + 48);
1496 /* leaq $0x????????, %r11 */
1498 gen_le32(seen_stack_size
);
1499 /* movq %r11, -0x10(%rbp) */
1501 /* leaq $-192(%rbp), %r11 */
1503 gen_le32(-176 - 24);
1504 /* movq %r11, -0x8(%rbp) */
1507 /* save all register passing arguments */
1508 for (i
= 0; i
< 8; i
++) {
1510 if (!tcc_state
->nosse
) {
1511 o(0xd60f66); /* movq */
1512 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1514 /* movq $0, loc+8(%rbp) */
1519 for (i
= 0; i
< REGN
; i
++) {
1520 push_arg_reg(REGN
-1-i
);
1524 sym
= func_type
->ref
;
1525 reg_param_index
= 0;
1526 sse_param_index
= 0;
1528 /* if the function returns a structure, then add an
1529 implicit pointer parameter */
1530 if (ret_mode
== x86_64_mode_memory
) {
1531 push_arg_reg(reg_param_index
);
1535 /* define parameters */
1536 while ((sym
= sym
->next
) != NULL
) {
1538 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1540 case x86_64_mode_sse
:
1541 if (tcc_state
->nosse
)
1542 tcc_error("SSE disabled but floating point arguments used");
1543 if (sse_param_index
+ reg_count
<= 8) {
1544 /* save arguments passed by register */
1545 loc
-= reg_count
* 8;
1547 for (i
= 0; i
< reg_count
; ++i
) {
1548 o(0xd60f66); /* movq */
1549 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1553 addr
= (addr
+ align
- 1) & -align
;
1559 case x86_64_mode_memory
:
1560 case x86_64_mode_x87
:
1561 addr
= (addr
+ align
- 1) & -align
;
1566 case x86_64_mode_integer
: {
1567 if (reg_param_index
+ reg_count
<= REGN
) {
1568 /* save arguments passed by register */
1569 loc
-= reg_count
* 8;
1571 for (i
= 0; i
< reg_count
; ++i
) {
1572 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1576 addr
= (addr
+ align
- 1) & -align
;
1582 default: break; /* nothing to be done for x86_64_mode_none */
1584 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1585 VT_LOCAL
| VT_LVAL
, param_addr
);
1588 #ifdef CONFIG_TCC_BCHECK
1589 if (tcc_state
->do_bounds_check
)
1590 gen_bounds_prolog();
1594 /* generate function epilog */
1595 void gfunc_epilog(void)
1599 #ifdef CONFIG_TCC_BCHECK
1600 if (tcc_state
->do_bounds_check
)
1601 gen_bounds_epilog();
1603 o(0xc9); /* leave */
1604 if (func_ret_sub
== 0) {
1607 o(0xc2); /* ret n */
1609 g(func_ret_sub
>> 8);
1611 /* align local size to word & save local variables */
1612 v
= (-loc
+ 15) & -16;
1614 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1615 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1616 o(0xec8148); /* sub rsp, stacksize */
1623 ST_FUNC
void gen_fill_nops(int bytes
)
1629 /* generate a jump to a label */
1632 return gjmp2(0xe9, t
);
1635 /* generate a jump to a fixed address */
1636 void gjmp_addr(int a
)
1644 oad(0xe9, a
- ind
- 5);
1648 ST_FUNC
int gjmp_append(int n
, int t
)
1651 /* insert vtop->c jump list in t */
1653 uint32_t n1
= n
, n2
;
1654 while ((n2
= read32le(p
= cur_text_section
->data
+ n1
)))
1662 ST_FUNC
int gjmp_cond(int op
, int t
)
1666 /* This was a float compare. If the parity flag is set
1667 the result was unordered. For anything except != this
1668 means false and we don't jump (anding both conditions).
1669 For != this means true (oring both).
1670 Take care about inverting the test. We need to jump
1671 to our target if the result was unordered and test wasn't NE,
1672 otherwise if unordered we don't want to jump. */
1673 int v
= vtop
->cmp_r
;
1675 if (op
^ v
^ (v
!= TOK_NE
))
1676 o(0x067a); /* jp +6 */
1680 t
= gjmp2(0x8a, t
); /* jp t */
1684 t
= gjmp2(op
- 16, t
);
1688 /* generate an integer binary operation */
1689 void gen_opi(int op
)
1694 ll
= is64_type(vtop
[-1].type
.t
);
1695 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1696 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1700 case TOK_ADDC1
: /* add with carry generation */
1703 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1710 /* XXX: generate inc and dec for smaller code ? */
1711 orex(ll
, r
, 0, 0x83);
1712 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1715 orex(ll
, r
, 0, 0x81);
1716 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1719 gv2(RC_INT
, RC_INT
);
1722 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1723 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1726 if (op
>= TOK_ULT
&& op
<= TOK_GT
)
1730 case TOK_SUBC1
: /* sub with carry generation */
1733 case TOK_ADDC2
: /* add with carry use */
1736 case TOK_SUBC2
: /* sub with carry use */
1749 gv2(RC_INT
, RC_INT
);
1752 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1753 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1765 opc
= 0xc0 | (opc
<< 3);
1771 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1772 o(opc
| REG_VALUE(r
));
1773 g(vtop
->c
.i
& (ll
? 63 : 31));
1775 /* we generate the shift in ecx */
1776 gv2(RC_INT
, RC_RCX
);
1778 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1779 o(opc
| REG_VALUE(r
));
1792 /* first operand must be in eax */
1793 /* XXX: need better constraint for second operand */
1794 gv2(RC_RAX
, RC_RCX
);
1799 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1800 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1801 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1802 if (op
== '%' || op
== TOK_UMOD
)
1814 void gen_opl(int op
)
1819 void vpush_const(int t
, int v
)
1821 CType ctype
= { t
| VT_CONSTANT
, 0 };
1822 vpushsym(&ctype
, external_global_sym(v
, &ctype
));
1826 /* generate a floating point operation 'v = t1 op t2' instruction. The
1827 two operands are guaranteed to have the same floating point type */
1828 /* XXX: need to use ST1 too */
1829 void gen_opf(int op
)
1831 int a
, ft
, fc
, swapped
, r
;
1832 int bt
= vtop
->type
.t
& VT_BTYPE
;
1833 int float_type
= bt
== VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1835 if (op
== TOK_NEG
) { /* unary minus */
1837 if (float_type
== RC_ST0
) {
1838 o(0xe0d9); /* fchs */
1840 /* -0.0, in libtcc1.c */
1841 vpush_const(bt
, bt
== VT_FLOAT
? TOK___mzerosf
: TOK___mzerodf
);
1843 if (bt
== VT_DOUBLE
)
1845 /* xorp[sd] %xmm1, %xmm0 */
1846 o(0xc0570f | (REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8) << 16);
1852 /* convert constants to memory references */
1853 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1858 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1861 /* must put at least one value in the floating point register */
1862 if ((vtop
[-1].r
& VT_LVAL
) &&
1863 (vtop
[0].r
& VT_LVAL
)) {
1869 /* swap the stack if needed so that t1 is the register and t2 is
1870 the memory reference */
1871 if (vtop
[-1].r
& VT_LVAL
) {
1875 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1876 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1877 /* load on stack second operand */
1878 load(TREG_ST0
, vtop
);
1879 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1880 if (op
== TOK_GE
|| op
== TOK_GT
)
1882 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1885 o(0xc9d9); /* fxch %st(1) */
1886 if (op
== TOK_EQ
|| op
== TOK_NE
)
1887 o(0xe9da); /* fucompp */
1889 o(0xd9de); /* fcompp */
1890 o(0xe0df); /* fnstsw %ax */
1892 o(0x45e480); /* and $0x45, %ah */
1893 o(0x40fC80); /* cmp $0x40, %ah */
1894 } else if (op
== TOK_NE
) {
1895 o(0x45e480); /* and $0x45, %ah */
1896 o(0x40f480); /* xor $0x40, %ah */
1898 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1899 o(0x05c4f6); /* test $0x05, %ah */
1902 o(0x45c4f6); /* test $0x45, %ah */
1908 /* no memory reference possible for long double operations */
1909 load(TREG_ST0
, vtop
);
1933 o(0xde); /* fxxxp %st, %st(1) */
1938 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1939 /* if saved lvalue, then we must reload it */
1942 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1944 r
= get_reg(RC_INT
);
1946 v1
.r
= VT_LOCAL
| VT_LVAL
;
1950 vtop
->r
= r
= r
| VT_LVAL
;
1953 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1956 if (op
== TOK_LE
|| op
== TOK_LT
)
1958 if (op
== TOK_LE
|| op
== TOK_GE
) {
1959 op
= 0x93; /* setae */
1961 op
= 0x97; /* seta */
1969 assert(!(vtop
[-1].r
& VT_LVAL
));
1971 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1973 if (op
== TOK_EQ
|| op
== TOK_NE
)
1974 o(0x2e0f); /* ucomisd */
1976 o(0x2f0f); /* comisd */
1978 if (vtop
->r
& VT_LVAL
) {
1979 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
1981 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
1985 vset_VT_CMP(op
| 0x100);
1988 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
2006 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
2009 /* if saved lvalue, then we must reload it */
2010 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
2012 r
= get_reg(RC_INT
);
2014 v1
.r
= VT_LOCAL
| VT_LVAL
;
2018 vtop
->r
= r
= r
| VT_LVAL
;
2021 assert(!(vtop
[-1].r
& VT_LVAL
));
2023 assert(vtop
->r
& VT_LVAL
);
2028 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2036 if (vtop
->r
& VT_LVAL
) {
2037 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2039 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2047 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2048 and 'long long' cases. */
2049 void gen_cvt_itof(int t
)
2051 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2054 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2055 /* signed long long to float/double/long double (unsigned case
2056 is handled generically) */
2057 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2058 o(0x242cdf); /* fildll (%rsp) */
2059 o(0x08c48348); /* add $8, %rsp */
2060 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2061 (VT_INT
| VT_UNSIGNED
)) {
2062 /* unsigned int to float/double/long double */
2063 o(0x6a); /* push $0 */
2065 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2066 o(0x242cdf); /* fildll (%rsp) */
2067 o(0x10c48348); /* add $16, %rsp */
2069 /* int to float/double/long double */
2070 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2071 o(0x2404db); /* fildl (%rsp) */
2072 o(0x08c48348); /* add $8, %rsp */
2076 int r
= get_reg(RC_FLOAT
);
2078 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2079 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2080 (VT_INT
| VT_UNSIGNED
) ||
2081 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2085 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2090 /* convert from one floating point type to another */
2091 void gen_cvt_ftof(int t
)
2099 if (bt
== VT_FLOAT
) {
2101 if (tbt
== VT_DOUBLE
) {
2102 o(0x140f); /* unpcklps */
2103 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2104 o(0x5a0f); /* cvtps2pd */
2105 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2106 } else if (tbt
== VT_LDOUBLE
) {
2108 /* movss %xmm0,-0x10(%rsp) */
2110 o(0x44 + REG_VALUE(vtop
->r
)*8);
2112 o(0xf02444d9); /* flds -0x10(%rsp) */
2115 } else if (bt
== VT_DOUBLE
) {
2117 if (tbt
== VT_FLOAT
) {
2118 o(0x140f66); /* unpcklpd */
2119 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2120 o(0x5a0f66); /* cvtpd2ps */
2121 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2122 } else if (tbt
== VT_LDOUBLE
) {
2124 /* movsd %xmm0,-0x10(%rsp) */
2126 o(0x44 + REG_VALUE(vtop
->r
)*8);
2128 o(0xf02444dd); /* fldl -0x10(%rsp) */
2134 r
= get_reg(RC_FLOAT
);
2135 if (tbt
== VT_DOUBLE
) {
2136 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2137 /* movsd -0x10(%rsp),%xmm0 */
2139 o(0x44 + REG_VALUE(r
)*8);
2142 } else if (tbt
== VT_FLOAT
) {
2143 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2144 /* movss -0x10(%rsp),%xmm0 */
2146 o(0x44 + REG_VALUE(r
)*8);
2153 /* convert fp to int 't' type */
2154 void gen_cvt_ftoi(int t
)
2156 int ft
, bt
, size
, r
;
2159 if (bt
== VT_LDOUBLE
) {
2160 gen_cvt_ftof(VT_DOUBLE
);
2170 r
= get_reg(RC_INT
);
2171 if (bt
== VT_FLOAT
) {
2173 } else if (bt
== VT_DOUBLE
) {
2178 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2179 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2183 // Generate sign extension from 32 to 64 bits:
2184 ST_FUNC
void gen_cvt_sxtw(void)
2187 /* x86_64 specific: movslq */
2189 o(0xc0 + (REG_VALUE(r
) << 3) + REG_VALUE(r
));
2192 /* char/short to int conversion */
2193 ST_FUNC
void gen_cvt_csti(int t
)
2197 sz
= !(t
& VT_UNSIGNED
);
2198 xl
= (t
& VT_BTYPE
) == VT_SHORT
;
2199 ll
= (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
;
2200 orex(ll
, r
, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2201 | (sz
<< 3 | xl
) << 8
2202 | (REG_VALUE(r
) << 3 | REG_VALUE(r
)) << 16
2206 /* increment tcov counter */
2207 ST_FUNC
void gen_increment_tcov (SValue
*sv
)
2209 o(0x058348); /* addq $1, xxx(%rip) */
2210 greloca(cur_text_section
, sv
->sym
, ind
, R_X86_64_PC32
, -5);
2215 /* computed goto support */
2222 /* Save the stack pointer onto the stack and return the location of its address */
2223 ST_FUNC
void gen_vla_sp_save(int addr
) {
2224 /* mov %rsp,addr(%rbp)*/
2225 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2228 /* Restore the SP from a location on the stack */
2229 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2230 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2233 #ifdef TCC_TARGET_PE
2234 /* Save result of gen_vla_alloc onto the stack */
2235 ST_FUNC
void gen_vla_result(int addr
) {
2236 /* mov %rax,addr(%rbp)*/
2237 gen_modrm64(0x89, TREG_RAX
, VT_LOCAL
, NULL
, addr
);
2241 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2242 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2245 #if defined(CONFIG_TCC_BCHECK)
2246 use_call
= tcc_state
->do_bounds_check
;
2248 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2253 vpush_helper_func(TOK_alloca
);
2254 vswap(); /* Move alloca ref past allocation size */
2259 r
= gv(RC_INT
); /* allocation size */
2262 o(0xe0 | REG_VALUE(r
));
2263 /* We align to 16 bytes rather than align */
2271 /* end of x86-64 code generator */
2272 /*************************************************************/
2273 #endif /* ! TARGET_DEFS_ONLY */
2274 /******************************************************/