2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
27 #define NB_ASM_REGS 16
28 #define CONFIG_TCC_ASM
30 /* a register can belong to several classes. The classes must be
31 sorted from more general to more precise (see gv2() code which does
32 assumptions on it). */
33 #define RC_INT 0x0001 /* generic integer register */
34 #define RC_FLOAT 0x0002 /* generic float register */
38 #define RC_ST0 0x0080 /* only for long double */
43 #define RC_XMM0 0x1000
44 #define RC_XMM1 0x2000
45 #define RC_XMM2 0x4000
46 #define RC_XMM3 0x8000
47 #define RC_XMM4 0x10000
48 #define RC_XMM5 0x20000
49 #define RC_XMM6 0x40000
50 #define RC_XMM7 0x80000
51 #define RC_IRET RC_RAX /* function return: integer register */
52 #define RC_IRE2 RC_RDX /* function return: second integer register */
53 #define RC_FRET RC_XMM0 /* function return: float register */
54 #define RC_FRE2 RC_XMM1 /* function return: second float register */
56 /* pretty names for the registers */
84 #define REX_BASE(reg) (((reg) >> 3) & 1)
85 #define REG_VALUE(reg) ((reg) & 7)
87 /* return registers for function */
88 #define REG_IRET TREG_RAX /* single word int return register */
89 #define REG_IRE2 TREG_RDX /* second word return register (for long long) */
90 #define REG_FRET TREG_XMM0 /* float return register */
91 #define REG_FRE2 TREG_XMM1 /* second float return register */
93 /* defined if function parameters must be evaluated in reverse order */
94 #define INVERT_FUNC_PARAMS
96 /* pointer size, in bytes */
99 /* long double size and alignment, in bytes */
100 #define LDOUBLE_SIZE 16
101 #define LDOUBLE_ALIGN 16
102 /* maximum alignment (for aligned attribute support) */
105 /* define if return values need to be extended explicitely
106 at caller side (for interfacing with non-TCC compilers) */
108 /******************************************************/
109 #else /* ! TARGET_DEFS_ONLY */
110 /******************************************************/
111 #define USING_GLOBALS
115 ST_DATA
const char * const target_machine_defs
=
120 ST_DATA
const int reg_classes
[NB_REGS
] = {
121 /* eax */ RC_INT
| RC_RAX
,
122 /* ecx */ RC_INT
| RC_RCX
,
123 /* edx */ RC_INT
| RC_RDX
,
137 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
138 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
139 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
140 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
141 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
142 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
143 /* xmm6 an xmm7 are included so gv() can be used on them,
144 but they are not tagged with RC_FLOAT because they are
145 callee saved on Windows */
151 static unsigned long func_sub_sp_offset
;
152 static int func_ret_sub
;
154 #if defined(CONFIG_TCC_BCHECK)
155 static addr_t func_bound_offset
;
156 static unsigned long func_bound_ind
;
157 ST_DATA
int func_bound_add_epilog
;
161 static int func_scratch
, func_alloca
;
164 /* XXX: make it faster ? */
165 ST_FUNC
void g(int c
)
171 if (ind1
> cur_text_section
->data_allocated
)
172 section_realloc(cur_text_section
, ind1
);
173 cur_text_section
->data
[ind
] = c
;
177 ST_FUNC
void o(unsigned int c
)
185 ST_FUNC
void gen_le16(int v
)
191 ST_FUNC
void gen_le32(int c
)
199 ST_FUNC
void gen_le64(int64_t c
)
211 static void orex(int ll
, int r
, int r2
, int b
)
213 if ((r
& VT_VALMASK
) >= VT_CONST
)
215 if ((r2
& VT_VALMASK
) >= VT_CONST
)
217 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
218 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
222 /* output a symbol and patch all calls to it */
223 ST_FUNC
void gsym_addr(int t
, int a
)
226 unsigned char *ptr
= cur_text_section
->data
+ t
;
227 uint32_t n
= read32le(ptr
); /* next value */
228 write32le(ptr
, a
< 0 ? -a
: a
- t
- 4);
233 static int is64_type(int t
)
235 return ((t
& VT_BTYPE
) == VT_PTR
||
236 (t
& VT_BTYPE
) == VT_FUNC
||
237 (t
& VT_BTYPE
) == VT_LLONG
);
240 /* instruction + 4 bytes data. Return the address of the data */
241 static int oad(int c
, int s
)
252 /* generate jmp to a label */
253 #define gjmp2(instr,lbl) oad(instr,lbl)
255 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
258 greloca(cur_text_section
, sym
, ind
, R_X86_64_32S
, c
), c
=0;
262 /* output constant with relocation if 'r & VT_SYM' is true */
263 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
266 greloca(cur_text_section
, sym
, ind
, R_X86_64_64
, c
), c
=0;
270 /* output constant with relocation if 'r & VT_SYM' is true */
271 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
274 greloca(cur_text_section
, sym
, ind
, R_X86_64_PC32
, c
-4), c
=4;
278 /* output got address with relocation */
279 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
282 tcc_error("internal error: no GOT on PE: %s %x %x | %02x %02x %02x\n",
283 get_tok_str(sym
->v
, NULL
), c
, r
,
284 cur_text_section
->data
[ind
-3],
285 cur_text_section
->data
[ind
-2],
286 cur_text_section
->data
[ind
-1]
289 greloca(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
, -4);
292 /* we use add c, %xxx for displacement */
294 o(0xc0 + REG_VALUE(r
));
299 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
301 op_reg
= REG_VALUE(op_reg
) << 3;
302 if ((r
& VT_VALMASK
) == VT_CONST
) {
303 /* constant memory reference */
305 /* Absolute memory reference */
306 o(0x04 | op_reg
); /* [sib] | destreg */
307 oad(0x25, c
); /* disp32 */
309 o(0x05 | op_reg
); /* (%rip)+disp32 | destreg */
311 gen_gotpcrel(r
, sym
, c
);
313 gen_addrpc32(r
, sym
, c
);
316 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
317 /* currently, we use only ebp as base */
319 /* short reference */
323 oad(0x85 | op_reg
, c
);
325 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
327 g(0x80 | op_reg
| REG_VALUE(r
));
330 g(0x00 | op_reg
| REG_VALUE(r
));
333 g(0x00 | op_reg
| REG_VALUE(r
));
337 /* generate a modrm reference. 'op_reg' contains the additional 3
339 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
341 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
344 /* generate a modrm reference. 'op_reg' contains the additional 3
346 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
349 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
350 orex(1, r
, op_reg
, opcode
);
351 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
355 /* load 'r' from value 'sv' */
356 void load(int r
, SValue
*sv
)
358 int v
, t
, ft
, fc
, fr
;
363 sv
= pe_getimport(sv
, &v2
);
367 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
369 if (fc
!= sv
->c
.i
&& (fr
& VT_SYM
))
370 tcc_error("64 bit addend in load");
372 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
374 #ifndef TCC_TARGET_PE
375 /* we use indirect access via got */
376 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
377 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
378 /* use the result register as a temporal register */
379 int tr
= r
| TREG_MEM
;
381 /* we cannot use float registers as a temporal register */
382 tr
= get_reg(RC_INT
) | TREG_MEM
;
384 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
386 /* load from the temporal register */
394 if (v
== VT_LLOCAL
) {
396 v1
.r
= VT_LOCAL
| VT_LVAL
;
399 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
400 fr
= get_reg(RC_INT
);
404 /* If the addends doesn't fit into a 32bit signed
405 we must use a 64bit move. We've checked above
406 that this doesn't have a sym associated. */
407 v1
.type
.t
= VT_LLONG
;
411 if (!(reg_classes
[fr
] & (RC_INT
|RC_R11
)))
412 fr
= get_reg(RC_INT
);
417 /* Like GCC we can load from small enough properly sized
418 structs and unions as well.
419 XXX maybe move to generic operand handling, but should
420 occur only with asm, so tccasm.c might also be a better place */
421 if ((ft
& VT_BTYPE
) == VT_STRUCT
) {
423 switch (type_size(&sv
->type
, &align
)) {
424 case 1: ft
= VT_BYTE
; break;
425 case 2: ft
= VT_SHORT
; break;
426 case 4: ft
= VT_INT
; break;
427 case 8: ft
= VT_LLONG
; break;
429 tcc_error("invalid aggregate type for register load");
433 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
435 r
= REG_VALUE(r
); /* movd */
436 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
437 b
= 0x7e0ff3; /* movq */
439 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
440 b
= 0xdb, r
= 5; /* fldt */
441 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
442 b
= 0xbe0f; /* movsbl */
443 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
444 b
= 0xb60f; /* movzbl */
445 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
446 b
= 0xbf0f; /* movswl */
447 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
448 b
= 0xb70f; /* movzwl */
449 } else if ((ft
& VT_TYPE
) == (VT_VOID
)) {
450 /* Can happen with zero size structs */
453 assert(((ft
& VT_BTYPE
) == VT_INT
)
454 || ((ft
& VT_BTYPE
) == VT_LLONG
)
455 || ((ft
& VT_BTYPE
) == VT_PTR
)
456 || ((ft
& VT_BTYPE
) == VT_FUNC
)
462 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
465 gen_modrm(r
, fr
, sv
->sym
, fc
);
472 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
473 gen_addrpc32(fr
, sv
->sym
, fc
);
475 if (sv
->sym
->type
.t
& VT_STATIC
) {
477 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
478 gen_addrpc32(fr
, sv
->sym
, fc
);
481 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
482 gen_gotpcrel(r
, sv
->sym
, fc
);
485 } else if (is64_type(ft
)) {
486 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
489 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
492 } else if (v
== VT_LOCAL
) {
493 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
494 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
495 } else if (v
== VT_CMP
) {
500 /* This was a float compare. If the parity bit is
501 set the result was unordered, meaning false for everything
502 except TOK_NE, and true for TOK_NE. */
503 orex(0, r
, 0, 0xb0 + REG_VALUE(r
)); /* mov $0/1,%al */
504 g(v
^ fc
^ (v
== TOK_NE
));
505 o(0x037a + (REX_BASE(r
) << 8));
507 orex(0,r
,0, 0x0f); /* setxx %br */
509 o(0xc0 + REG_VALUE(r
));
511 o(0xc0b6 + REG_VALUE(r
) * 0x900); /* movzbl %al, %eax */
512 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
515 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
516 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
519 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
521 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
523 /* gen_cvt_ftof(VT_DOUBLE); */
524 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
525 /* movsd -0x10(%rsp),%xmmN */
527 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
530 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
531 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
534 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
537 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
539 } else if (r
== TREG_ST0
) {
540 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
541 /* gen_cvt_ftof(VT_LDOUBLE); */
542 /* movsd %xmmN,-0x10(%rsp) */
544 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
546 o(0xf02444dd); /* fldl -0x10(%rsp) */
548 orex(is64_type(ft
), r
, v
, 0x89);
549 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
555 /* store register 'r' in lvalue 'v' */
556 void store(int r
, SValue
*v
)
560 /* store the REX prefix in this variable when PIC is enabled */
565 v
= pe_getimport(v
, &v2
);
568 fr
= v
->r
& VT_VALMASK
;
571 if (fc
!= v
->c
.i
&& (fr
& VT_SYM
))
572 tcc_error("64 bit addend in store");
573 ft
&= ~(VT_VOLATILE
| VT_CONSTANT
);
576 #ifndef TCC_TARGET_PE
577 /* we need to access the variable via got */
580 && !(v
->sym
->type
.t
& VT_STATIC
)) {
581 /* mov xx(%rip), %r11 */
583 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.i
);
584 pic
= is64_type(bt
) ? 0x49 : 0x41;
588 /* XXX: incorrect if float reg to reg */
589 if (bt
== VT_FLOAT
) {
592 o(0x7e0f); /* movd */
594 } else if (bt
== VT_DOUBLE
) {
597 o(0xd60f); /* movq */
599 } else if (bt
== VT_LDOUBLE
) {
600 o(0xc0d9); /* fld %st(0) */
608 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
610 else if (is64_type(bt
))
616 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
621 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
622 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
623 } else if (fr
!= r
) {
624 orex(1, fr
, r
, op64
);
625 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
628 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
629 gen_modrm(r
, v
->r
, v
->sym
, fc
);
630 } else if (fr
!= r
) {
631 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
636 /* 'is_jmp' is '1' if it is a jump */
637 static void gcall_or_jmp(int is_jmp
)
640 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
&&
641 ((vtop
->r
& VT_SYM
) && (vtop
->c
.i
-4) == (int)(vtop
->c
.i
-4))) {
642 /* constant symbolic case -> simple relocation */
644 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PC32
, (int)(vtop
->c
.i
-4));
646 greloca(cur_text_section
, vtop
->sym
, ind
+ 1, R_X86_64_PLT32
, (int)(vtop
->c
.i
-4));
648 oad(0xe8 + is_jmp
, 0); /* call/jmp im */
650 /* otherwise, indirect call */
654 o(0xff); /* call/jmp *r */
655 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
659 #if defined(CONFIG_TCC_BCHECK)
661 static void gen_bounds_call(int v
)
663 Sym
*sym
= external_helper_sym(v
);
666 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
668 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PLT32
, -4);
673 # define TREG_FASTCALL_1 TREG_RCX
675 # define TREG_FASTCALL_1 TREG_RDI
678 static void gen_bounds_prolog(void)
680 /* leave some room for bound checking code */
681 func_bound_offset
= lbounds_section
->data_offset
;
682 func_bound_ind
= ind
;
683 func_bound_add_epilog
= 0;
684 o(0x0d8d48 + ((TREG_FASTCALL_1
== TREG_RDI
) * 0x300000)); /*lbound section pointer */
686 oad(0xb8, 0); /* call to function */
689 static void gen_bounds_epilog(void)
694 int offset_modified
= func_bound_offset
!= lbounds_section
->data_offset
;
696 if (!offset_modified
&& !func_bound_add_epilog
)
699 /* add end of table info */
700 bounds_ptr
= section_ptr_add(lbounds_section
, sizeof(addr_t
));
703 sym_data
= get_sym_ref(&char_pointer_type
, lbounds_section
,
704 func_bound_offset
, lbounds_section
->data_offset
);
706 /* generate bound local allocation */
707 if (offset_modified
) {
709 ind
= func_bound_ind
;
710 greloca(cur_text_section
, sym_data
, ind
+ 3, R_X86_64_PC32
, -4);
712 gen_bounds_call(TOK___bound_local_new
);
716 /* generate bound check local freeing */
717 o(0x5250); /* save returned value, if any */
718 o(0x20ec8348); /* sub $32,%rsp */
719 o(0x290f); /* movaps %xmm0,0x10(%rsp) */
721 o(0x240c290f); /* movaps %xmm1,(%rsp) */
722 greloca(cur_text_section
, sym_data
, ind
+ 3, R_X86_64_PC32
, -4);
723 o(0x0d8d48 + ((TREG_FASTCALL_1
== TREG_RDI
) * 0x300000)); /* lea xxx(%rip), %rcx/rdi */
725 gen_bounds_call(TOK___bound_local_delete
);
726 o(0x280f); /* movaps 0x10(%rsp),%xmm0 */
728 o(0x240c280f); /* movaps (%rsp),%xmm1 */
729 o(0x20c48348); /* add $32,%rsp */
730 o(0x585a); /* restore returned value, if any */
737 static const uint8_t arg_regs
[REGN
] = {
738 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
741 /* Prepare arguments in R10 and R11 rather than RCX and RDX
742 because gv() will not ever use these */
743 static int arg_prepare_reg(int idx
) {
744 if (idx
== 0 || idx
== 1)
745 /* idx=0: r10, idx=1: r11 */
748 return idx
>= 0 && idx
< REGN
? arg_regs
[idx
] : 0;
751 /* Generate function call. The function address is pushed first, then
752 all the parameters in call order. This functions pops all the
753 parameters and the function address. */
755 static void gen_offs_sp(int b
, int r
, int d
)
757 orex(1,0,r
& 0x100 ? 0 : r
, b
);
759 o(0x2444 | (REG_VALUE(r
) << 3));
762 o(0x2484 | (REG_VALUE(r
) << 3));
767 static int using_regs(int size
)
769 return !(size
> 8 || (size
& (size
- 1)));
772 /* Return the number of registers needed to return the struct, or 0 if
773 returning via struct pointer. */
774 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
777 *ret_align
= 1; // Never have to re-align return values for x86-64
779 size
= type_size(vt
, &align
);
780 if (!using_regs(size
))
794 static int is_sse_float(int t
) {
797 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
800 static int gfunc_arg_size(CType
*type
) {
802 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
804 return type_size(type
, &align
);
807 void gfunc_call(int nb_args
)
809 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
812 #ifdef CONFIG_TCC_BCHECK
813 if (tcc_state
->do_bounds_check
)
814 gbound_args(nb_args
);
817 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
820 /* for struct arguments, we need to call memcpy and the function
821 call breaks register passing arguments we are preparing.
822 So, we process arguments which will be passed by stack first. */
823 struct_size
= args_size
;
824 for(i
= 0; i
< nb_args
; i
++) {
829 bt
= (sv
->type
.t
& VT_BTYPE
);
830 size
= gfunc_arg_size(&sv
->type
);
832 if (using_regs(size
))
833 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
835 if (bt
== VT_STRUCT
) {
836 /* align to stack align size */
837 size
= (size
+ 15) & ~15;
838 /* generate structure store */
840 gen_offs_sp(0x8d, r
, struct_size
);
843 /* generate memcpy call */
844 vset(&sv
->type
, r
| VT_LVAL
, 0);
848 } else if (bt
== VT_LDOUBLE
) {
850 gen_offs_sp(0xdb, 0x107, struct_size
);
855 if (func_scratch
< struct_size
)
856 func_scratch
= struct_size
;
859 struct_size
= args_size
;
861 for(i
= 0; i
< nb_args
; i
++) {
863 bt
= (vtop
->type
.t
& VT_BTYPE
);
865 size
= gfunc_arg_size(&vtop
->type
);
866 if (!using_regs(size
)) {
867 /* align to stack align size */
868 size
= (size
+ 15) & ~15;
871 gen_offs_sp(0x8d, d
, struct_size
);
872 gen_offs_sp(0x89, d
, arg
*8);
874 d
= arg_prepare_reg(arg
);
875 gen_offs_sp(0x8d, d
, struct_size
);
879 if (is_sse_float(vtop
->type
.t
)) {
880 if (tcc_state
->nosse
)
881 tcc_error("SSE disabled");
884 /* movq %xmm0, j*8(%rsp) */
885 gen_offs_sp(0xd60f66, 0x100, arg
*8);
887 /* Load directly to xmmN register */
889 d
= arg_prepare_reg(arg
);
890 /* mov %xmmN, %rxx */
893 o(0xc0 + arg
*8 + REG_VALUE(d
));
896 if (bt
== VT_STRUCT
) {
897 vtop
->type
.ref
= NULL
;
898 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
899 : size
> 1 ? VT_SHORT
: VT_BYTE
;
904 gen_offs_sp(0x89, r
, arg
*8);
906 d
= arg_prepare_reg(arg
);
907 orex(1,d
,r
,0x89); /* mov */
908 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
915 /* Copy R10 and R11 into RCX and RDX, respectively */
917 o(0xd1894c); /* mov %r10, %rcx */
919 o(0xda894c); /* mov %r11, %rdx */
925 if ((vtop
->r
& VT_SYM
) && vtop
->sym
->v
== TOK_alloca
) {
926 /* need to add the "func_scratch" area after alloca */
927 o(0x48); func_alloca
= oad(0x05, func_alloca
); /* add $NN, %rax */
928 #ifdef CONFIG_TCC_BCHECK
929 if (tcc_state
->do_bounds_check
)
930 gen_bounds_call(TOK___bound_alloca_nr
); /* new region */
937 #define FUNC_PROLOG_SIZE 11
939 /* generate function prolog of type 't' */
940 void gfunc_prolog(Sym
*func_sym
)
942 CType
*func_type
= &func_sym
->type
;
943 int addr
, reg_param_index
, bt
, size
;
953 ind
+= FUNC_PROLOG_SIZE
;
954 func_sub_sp_offset
= ind
;
957 sym
= func_type
->ref
;
959 /* if the function returns a structure, then add an
960 implicit pointer parameter */
961 size
= gfunc_arg_size(&func_vt
);
962 if (!using_regs(size
)) {
963 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
969 /* define parameters */
970 while ((sym
= sym
->next
) != NULL
) {
972 bt
= type
->t
& VT_BTYPE
;
973 size
= gfunc_arg_size(type
);
974 if (!using_regs(size
)) {
975 if (reg_param_index
< REGN
) {
976 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
978 sym_push(sym
->v
& ~SYM_FIELD
, type
,
979 VT_LLOCAL
| VT_LVAL
, addr
);
981 if (reg_param_index
< REGN
) {
982 /* save arguments passed by register */
983 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
984 if (tcc_state
->nosse
)
985 tcc_error("SSE disabled");
986 o(0xd60f66); /* movq */
987 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
989 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
992 sym_push(sym
->v
& ~SYM_FIELD
, type
,
993 VT_LOCAL
| VT_LVAL
, addr
);
999 while (reg_param_index
< REGN
) {
1001 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
1006 #ifdef CONFIG_TCC_BCHECK
1007 if (tcc_state
->do_bounds_check
)
1008 gen_bounds_prolog();
1012 /* generate function epilog */
1013 void gfunc_epilog(void)
1017 /* align local size to word & save local variables */
1018 func_scratch
= (func_scratch
+ 15) & -16;
1019 loc
= (loc
& -16) - func_scratch
;
1021 #ifdef CONFIG_TCC_BCHECK
1022 if (tcc_state
->do_bounds_check
)
1023 gen_bounds_epilog();
1026 o(0xc9); /* leave */
1027 if (func_ret_sub
== 0) {
1030 o(0xc2); /* ret n */
1032 g(func_ret_sub
>> 8);
1036 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1040 Sym
*sym
= external_helper_sym(TOK___chkstk
);
1041 oad(0xb8, v
); /* mov stacksize, %eax */
1042 oad(0xe8, 0); /* call __chkstk, (does the stackframe too) */
1043 greloca(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
, -4);
1044 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
1046 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1047 o(0xec8148); /* sub rsp, stacksize */
1051 /* add the "func_scratch" area after each alloca seen */
1052 gsym_addr(func_alloca
, -func_scratch
);
1054 cur_text_section
->data_offset
= saved_ind
;
1055 pe_add_unwind_data(ind
, saved_ind
, v
);
1056 ind
= cur_text_section
->data_offset
;
1061 static void gadd_sp(int val
)
1063 if (val
== (char)val
) {
1067 oad(0xc48148, val
); /* add $xxx, %rsp */
1071 typedef enum X86_64_Mode
{
1074 x86_64_mode_integer
,
1079 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
1083 else if (a
== x86_64_mode_none
)
1085 else if (b
== x86_64_mode_none
)
1087 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
1088 return x86_64_mode_memory
;
1089 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
1090 return x86_64_mode_integer
;
1091 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
1092 return x86_64_mode_memory
;
1094 return x86_64_mode_sse
;
1097 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
1102 switch (ty
->t
& VT_BTYPE
) {
1103 case VT_VOID
: return x86_64_mode_none
;
1112 return x86_64_mode_integer
;
1115 case VT_DOUBLE
: return x86_64_mode_sse
;
1117 case VT_LDOUBLE
: return x86_64_mode_x87
;
1122 mode
= x86_64_mode_none
;
1123 for (f
= f
->next
; f
; f
= f
->next
)
1124 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
1132 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
1135 int size
, align
, ret_t
= 0;
1137 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1142 mode
= x86_64_mode_integer
;
1144 size
= type_size(ty
, &align
);
1145 *psize
= (size
+ 7) & ~7;
1146 *palign
= (align
+ 7) & ~7;
1149 mode
= x86_64_mode_memory
;
1151 mode
= classify_x86_64_inner(ty
);
1153 case x86_64_mode_integer
:
1167 if ((ty
->t
& VT_BTYPE
) == VT_STRUCT
|| (ty
->t
& VT_UNSIGNED
))
1168 ret_t
|= VT_UNSIGNED
;
1172 case x86_64_mode_x87
:
1177 case x86_64_mode_sse
:
1183 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1186 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1199 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1201 /* This definition must be synced with stdarg.h */
1202 enum __va_arg_type
{
1203 __va_gen_reg
, __va_float_reg
, __va_stack
1205 int size
, align
, reg_count
;
1206 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1208 default: return __va_stack
;
1209 case x86_64_mode_integer
: return __va_gen_reg
;
1210 case x86_64_mode_sse
: return __va_float_reg
;
1214 /* Return the number of registers needed to return the struct, or 0 if
1215 returning via struct pointer. */
1216 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
, int *regsize
)
1218 int size
, align
, reg_count
;
1219 *ret_align
= 1; // Never have to re-align return values for x86-64
1221 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1225 static const uint8_t arg_regs
[REGN
] = {
1226 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1229 static int arg_prepare_reg(int idx
) {
1230 if (idx
== 2 || idx
== 3)
1231 /* idx=2: r10, idx=3: r11 */
1234 return idx
>= 0 && idx
< REGN
? arg_regs
[idx
] : 0;
1237 /* Generate function call. The function address is pushed first, then
1238 all the parameters in call order. This functions pops all the
1239 parameters and the function address. */
1240 void gfunc_call(int nb_args
)
1244 int size
, align
, r
, args_size
, stack_adjust
, i
, reg_count
, k
;
1245 int nb_reg_args
= 0;
1246 int nb_sse_args
= 0;
1247 int sse_reg
, gen_reg
;
1248 char *onstack
= tcc_malloc((nb_args
+ 1) * sizeof (char));
1250 #ifdef CONFIG_TCC_BCHECK
1251 if (tcc_state
->do_bounds_check
)
1252 gbound_args(nb_args
);
1255 /* calculate the number of integer/float register arguments, remember
1256 arguments to be passed via stack (in onstack[]), and also remember
1257 if we have to align the stack pointer to 16 (onstack[i] == 2). Needs
1258 to be done in a left-to-right pass over arguments. */
1260 for(i
= nb_args
- 1; i
>= 0; i
--) {
1261 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1262 if (size
== 0) continue;
1263 if (mode
== x86_64_mode_sse
&& nb_sse_args
+ reg_count
<= 8) {
1264 nb_sse_args
+= reg_count
;
1266 } else if (mode
== x86_64_mode_integer
&& nb_reg_args
+ reg_count
<= REGN
) {
1267 nb_reg_args
+= reg_count
;
1269 } else if (mode
== x86_64_mode_none
) {
1272 if (align
== 16 && (stack_adjust
&= 15)) {
1277 stack_adjust
+= size
;
1281 if (nb_sse_args
&& tcc_state
->nosse
)
1282 tcc_error("SSE disabled but floating point arguments passed");
1284 /* fetch cpu flag before generating any code */
1285 if ((vtop
->r
& VT_VALMASK
) == VT_CMP
)
1288 /* for struct arguments, we need to call memcpy and the function
1289 call breaks register passing arguments we are preparing.
1290 So, we process arguments which will be passed by stack first. */
1291 gen_reg
= nb_reg_args
;
1292 sse_reg
= nb_sse_args
;
1295 for (i
= k
= 0; i
< nb_args
;) {
1296 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1298 if (!onstack
[i
+ k
]) {
1302 /* Possibly adjust stack to align SSE boundary. We're processing
1303 args from right to left while allocating happens left to right
1304 (stack grows down), so the adjustment needs to happen _after_
1305 an argument that requires it. */
1307 o(0x50); /* push %rax; aka sub $8,%rsp */
1311 if (onstack
[i
+ k
] == 2)
1317 switch (vtop
->type
.t
& VT_BTYPE
) {
1319 /* allocate the necessary size on stack */
1321 oad(0xec81, size
); /* sub $xxx, %rsp */
1322 /* generate structure store */
1323 r
= get_reg(RC_INT
);
1324 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1325 o(0xe0 + REG_VALUE(r
));
1326 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1328 /* keep stack aligned for (__bound_)memmove call */
1329 o(0x10ec8348); /* sub $16,%rsp */
1330 o(0xf0e48348); /* and $-16,%rsp */
1331 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r (last %rsp) */
1332 o(0x08ec8348); /* sub $8,%rsp */
1334 o(0x08c48348); /* add $8,%rsp */
1335 o(0x5c); /* pop %rsp */
1340 oad(0xec8148, size
); /* sub $xxx, %rsp */
1341 o(0x7cdb); /* fstpt 0(%rsp) */
1348 assert(mode
== x86_64_mode_sse
);
1350 o(0x50); /* push $rax */
1351 /* movq %xmmN, (%rsp) */
1353 o(0x04 + REG_VALUE(r
)*8);
1358 assert(mode
== x86_64_mode_integer
);
1360 /* XXX: implicit cast ? */
1362 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1374 /* XXX This should be superfluous. */
1375 save_regs(0); /* save used temporary registers */
1377 /* then, we prepare register passing arguments.
1378 Note that we cannot set RDX and RCX in this loop because gv()
1379 may break these temporary registers. Let's use R10 and R11
1381 assert(gen_reg
<= REGN
);
1382 assert(sse_reg
<= 8);
1383 for(i
= 0; i
< nb_args
; i
++) {
1384 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1385 if (size
== 0) continue;
1386 /* Alter stack entry type so that gv() knows how to treat it */
1388 if (mode
== x86_64_mode_sse
) {
1389 if (reg_count
== 2) {
1391 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1392 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1393 /* movaps %xmm1, %xmmN */
1395 o(0xc1 + ((sse_reg
+1) << 3));
1396 /* movaps %xmm0, %xmmN */
1398 o(0xc0 + (sse_reg
<< 3));
1401 assert(reg_count
== 1);
1403 /* Load directly to register */
1404 gv(RC_XMM0
<< sse_reg
);
1406 } else if (mode
== x86_64_mode_integer
) {
1408 /* XXX: implicit cast ? */
1410 gen_reg
-= reg_count
;
1412 d
= arg_prepare_reg(gen_reg
);
1413 orex(1,d
,r
,0x89); /* mov */
1414 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1415 if (reg_count
== 2) {
1416 d
= arg_prepare_reg(gen_reg
+1);
1417 orex(1,d
,vtop
->r2
,0x89); /* mov */
1418 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1423 assert(gen_reg
== 0);
1424 assert(sse_reg
== 0);
1426 /* We shouldn't have many operands on the stack anymore, but the
1427 call address itself is still there, and it might be in %eax
1428 (or edx/ecx) currently, which the below writes would clobber.
1429 So evict all remaining operands here. */
1432 /* Copy R10 and R11 into RDX and RCX, respectively */
1433 if (nb_reg_args
> 2) {
1434 o(0xd2894c); /* mov %r10, %rdx */
1435 if (nb_reg_args
> 3) {
1436 o(0xd9894c); /* mov %r11, %rcx */
1440 if (vtop
->type
.ref
->f
.func_type
!= FUNC_NEW
) /* implies FUNC_OLD or FUNC_ELLIPSIS */
1441 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1448 #define FUNC_PROLOG_SIZE 11
1450 static void push_arg_reg(int i
) {
1452 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1455 /* generate function prolog of type 't' */
1456 void gfunc_prolog(Sym
*func_sym
)
1458 CType
*func_type
= &func_sym
->type
;
1459 X86_64_Mode mode
, ret_mode
;
1460 int i
, addr
, align
, size
, reg_count
;
1461 int param_addr
= 0, reg_param_index
, sse_param_index
;
1465 sym
= func_type
->ref
;
1466 addr
= PTR_SIZE
* 2;
1468 ind
+= FUNC_PROLOG_SIZE
;
1469 func_sub_sp_offset
= ind
;
1471 ret_mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1474 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1475 seen_reg_num
= ret_mode
== x86_64_mode_memory
;
1477 /* frame pointer and return address */
1478 seen_stack_size
= PTR_SIZE
* 2;
1479 /* count the number of seen parameters */
1480 sym
= func_type
->ref
;
1481 while ((sym
= sym
->next
) != NULL
) {
1483 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1487 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1490 case x86_64_mode_integer
:
1491 if (seen_reg_num
+ reg_count
> REGN
)
1493 seen_reg_num
+= reg_count
;
1496 case x86_64_mode_sse
:
1497 if (seen_sse_num
+ reg_count
> 8)
1499 seen_sse_num
+= reg_count
;
1505 /* movl $0x????????, -0x18(%rbp) */
1507 gen_le32(seen_reg_num
* 8);
1508 /* movl $0x????????, -0x14(%rbp) */
1510 gen_le32(seen_sse_num
* 16 + 48);
1511 /* leaq $0x????????, %r11 */
1513 gen_le32(seen_stack_size
);
1514 /* movq %r11, -0x10(%rbp) */
1516 /* leaq $-192(%rbp), %r11 */
1518 gen_le32(-176 - 24);
1519 /* movq %r11, -0x8(%rbp) */
1522 /* save all register passing arguments */
1523 for (i
= 0; i
< 8; i
++) {
1525 if (!tcc_state
->nosse
) {
1526 o(0xd60f66); /* movq */
1527 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1529 /* movq $0, loc+8(%rbp) */
1534 for (i
= 0; i
< REGN
; i
++) {
1535 push_arg_reg(REGN
-1-i
);
1539 sym
= func_type
->ref
;
1540 reg_param_index
= 0;
1541 sse_param_index
= 0;
1543 /* if the function returns a structure, then add an
1544 implicit pointer parameter */
1545 if (ret_mode
== x86_64_mode_memory
) {
1546 push_arg_reg(reg_param_index
);
1550 /* define parameters */
1551 while ((sym
= sym
->next
) != NULL
) {
1553 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1555 case x86_64_mode_sse
:
1556 if (tcc_state
->nosse
)
1557 tcc_error("SSE disabled but floating point arguments used");
1558 if (sse_param_index
+ reg_count
<= 8) {
1559 /* save arguments passed by register */
1560 loc
-= reg_count
* 8;
1562 for (i
= 0; i
< reg_count
; ++i
) {
1563 o(0xd60f66); /* movq */
1564 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1568 addr
= (addr
+ align
- 1) & -align
;
1574 case x86_64_mode_memory
:
1575 case x86_64_mode_x87
:
1576 addr
= (addr
+ align
- 1) & -align
;
1581 case x86_64_mode_integer
: {
1582 if (reg_param_index
+ reg_count
<= REGN
) {
1583 /* save arguments passed by register */
1584 loc
-= reg_count
* 8;
1586 for (i
= 0; i
< reg_count
; ++i
) {
1587 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1591 addr
= (addr
+ align
- 1) & -align
;
1597 default: break; /* nothing to be done for x86_64_mode_none */
1599 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1600 VT_LOCAL
| VT_LVAL
, param_addr
);
1603 #ifdef CONFIG_TCC_BCHECK
1604 if (tcc_state
->do_bounds_check
)
1605 gen_bounds_prolog();
1609 /* generate function epilog */
1610 void gfunc_epilog(void)
1614 #ifdef CONFIG_TCC_BCHECK
1615 if (tcc_state
->do_bounds_check
)
1616 gen_bounds_epilog();
1618 o(0xc9); /* leave */
1619 if (func_ret_sub
== 0) {
1622 o(0xc2); /* ret n */
1624 g(func_ret_sub
>> 8);
1626 /* align local size to word & save local variables */
1627 v
= (-loc
+ 15) & -16;
1629 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1630 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1631 o(0xec8148); /* sub rsp, stacksize */
1638 ST_FUNC
void gen_fill_nops(int bytes
)
1644 /* generate a jump to a label */
1647 return gjmp2(0xe9, t
);
1650 /* generate a jump to a fixed address */
1651 void gjmp_addr(int a
)
1659 oad(0xe9, a
- ind
- 5);
1663 ST_FUNC
int gjmp_append(int n
, int t
)
1666 /* insert vtop->c jump list in t */
1668 uint32_t n1
= n
, n2
;
1669 while ((n2
= read32le(p
= cur_text_section
->data
+ n1
)))
1677 ST_FUNC
int gjmp_cond(int op
, int t
)
1681 /* This was a float compare. If the parity flag is set
1682 the result was unordered. For anything except != this
1683 means false and we don't jump (anding both conditions).
1684 For != this means true (oring both).
1685 Take care about inverting the test. We need to jump
1686 to our target if the result was unordered and test wasn't NE,
1687 otherwise if unordered we don't want to jump. */
1688 int v
= vtop
->cmp_r
;
1690 if (op
^ v
^ (v
!= TOK_NE
))
1691 o(0x067a); /* jp +6 */
1695 t
= gjmp2(0x8a, t
); /* jp t */
1699 t
= gjmp2(op
- 16, t
);
1703 /* generate an integer binary operation */
1704 void gen_opi(int op
)
1709 ll
= is64_type(vtop
[-1].type
.t
);
1710 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1711 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1715 case TOK_ADDC1
: /* add with carry generation */
1718 if (cc
&& (!ll
|| (int)vtop
->c
.i
== vtop
->c
.i
)) {
1725 /* XXX: generate inc and dec for smaller code ? */
1726 orex(ll
, r
, 0, 0x83);
1727 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1730 orex(ll
, r
, 0, 0x81);
1731 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1734 gv2(RC_INT
, RC_INT
);
1737 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1738 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1741 if (op
>= TOK_ULT
&& op
<= TOK_GT
)
1745 case TOK_SUBC1
: /* sub with carry generation */
1748 case TOK_ADDC2
: /* add with carry use */
1751 case TOK_SUBC2
: /* sub with carry use */
1764 gv2(RC_INT
, RC_INT
);
1767 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1768 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1780 opc
= 0xc0 | (opc
<< 3);
1786 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1787 o(opc
| REG_VALUE(r
));
1788 g(vtop
->c
.i
& (ll
? 63 : 31));
1790 /* we generate the shift in ecx */
1791 gv2(RC_INT
, RC_RCX
);
1793 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1794 o(opc
| REG_VALUE(r
));
1807 /* first operand must be in eax */
1808 /* XXX: need better constraint for second operand */
1809 gv2(RC_RAX
, RC_RCX
);
1814 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1815 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1816 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1817 if (op
== '%' || op
== TOK_UMOD
)
1829 void gen_opl(int op
)
1834 void vpush_const(int t
, int v
)
1836 CType ctype
= { t
| VT_CONSTANT
, 0 };
1837 vpushsym(&ctype
, external_global_sym(v
, &ctype
));
1841 /* generate a floating point operation 'v = t1 op t2' instruction. The
1842 two operands are guaranteed to have the same floating point type */
1843 /* XXX: need to use ST1 too */
1844 void gen_opf(int op
)
1846 int a
, ft
, fc
, swapped
, r
;
1847 int bt
= vtop
->type
.t
& VT_BTYPE
;
1848 int float_type
= bt
== VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1850 if (op
== TOK_NEG
) { /* unary minus */
1852 if (float_type
== RC_ST0
) {
1853 o(0xe0d9); /* fchs */
1855 /* -0.0, in libtcc1.c */
1856 vpush_const(bt
, bt
== VT_FLOAT
? TOK___mzerosf
: TOK___mzerodf
);
1858 if (bt
== VT_DOUBLE
)
1860 /* xorp[sd] %xmm1, %xmm0 */
1861 o(0xc0570f | (REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8) << 16);
1867 /* convert constants to memory references */
1868 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1873 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1876 /* must put at least one value in the floating point register */
1877 if ((vtop
[-1].r
& VT_LVAL
) &&
1878 (vtop
[0].r
& VT_LVAL
)) {
1884 /* swap the stack if needed so that t1 is the register and t2 is
1885 the memory reference */
1886 if (vtop
[-1].r
& VT_LVAL
) {
1890 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1891 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1892 /* load on stack second operand */
1893 load(TREG_ST0
, vtop
);
1894 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1895 if (op
== TOK_GE
|| op
== TOK_GT
)
1897 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1900 o(0xc9d9); /* fxch %st(1) */
1901 if (op
== TOK_EQ
|| op
== TOK_NE
)
1902 o(0xe9da); /* fucompp */
1904 o(0xd9de); /* fcompp */
1905 o(0xe0df); /* fnstsw %ax */
1907 o(0x45e480); /* and $0x45, %ah */
1908 o(0x40fC80); /* cmp $0x40, %ah */
1909 } else if (op
== TOK_NE
) {
1910 o(0x45e480); /* and $0x45, %ah */
1911 o(0x40f480); /* xor $0x40, %ah */
1913 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1914 o(0x05c4f6); /* test $0x05, %ah */
1917 o(0x45c4f6); /* test $0x45, %ah */
1923 /* no memory reference possible for long double operations */
1924 load(TREG_ST0
, vtop
);
1948 o(0xde); /* fxxxp %st, %st(1) */
1953 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1954 /* if saved lvalue, then we must reload it */
1957 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1959 r
= get_reg(RC_INT
);
1961 v1
.r
= VT_LOCAL
| VT_LVAL
;
1965 vtop
->r
= r
= r
| VT_LVAL
;
1968 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1971 if (op
== TOK_LE
|| op
== TOK_LT
)
1973 if (op
== TOK_LE
|| op
== TOK_GE
) {
1974 op
= 0x93; /* setae */
1976 op
= 0x97; /* seta */
1984 assert(!(vtop
[-1].r
& VT_LVAL
));
1986 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1988 if (op
== TOK_EQ
|| op
== TOK_NE
)
1989 o(0x2e0f); /* ucomisd */
1991 o(0x2f0f); /* comisd */
1993 if (vtop
->r
& VT_LVAL
) {
1994 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
1996 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2000 vset_VT_CMP(op
| 0x100);
2003 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
2021 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
2024 /* if saved lvalue, then we must reload it */
2025 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
2027 r
= get_reg(RC_INT
);
2029 v1
.r
= VT_LOCAL
| VT_LVAL
;
2033 vtop
->r
= r
= r
| VT_LVAL
;
2036 assert(!(vtop
[-1].r
& VT_LVAL
));
2038 assert(vtop
->r
& VT_LVAL
);
2043 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
2051 if (vtop
->r
& VT_LVAL
) {
2052 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
2054 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
2062 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
2063 and 'long long' cases. */
2064 void gen_cvt_itof(int t
)
2066 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
2069 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2070 /* signed long long to float/double/long double (unsigned case
2071 is handled generically) */
2072 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2073 o(0x242cdf); /* fildll (%rsp) */
2074 o(0x08c48348); /* add $8, %rsp */
2075 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2076 (VT_INT
| VT_UNSIGNED
)) {
2077 /* unsigned int to float/double/long double */
2078 o(0x6a); /* push $0 */
2080 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2081 o(0x242cdf); /* fildll (%rsp) */
2082 o(0x10c48348); /* add $16, %rsp */
2084 /* int to float/double/long double */
2085 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
2086 o(0x2404db); /* fildl (%rsp) */
2087 o(0x08c48348); /* add $8, %rsp */
2091 int r
= get_reg(RC_FLOAT
);
2093 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
2094 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
2095 (VT_INT
| VT_UNSIGNED
) ||
2096 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
2100 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
2105 /* convert from one floating point type to another */
2106 void gen_cvt_ftof(int t
)
2114 if (bt
== VT_FLOAT
) {
2116 if (tbt
== VT_DOUBLE
) {
2117 o(0x140f); /* unpcklps */
2118 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2119 o(0x5a0f); /* cvtps2pd */
2120 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2121 } else if (tbt
== VT_LDOUBLE
) {
2123 /* movss %xmm0,-0x10(%rsp) */
2125 o(0x44 + REG_VALUE(vtop
->r
)*8);
2127 o(0xf02444d9); /* flds -0x10(%rsp) */
2130 } else if (bt
== VT_DOUBLE
) {
2132 if (tbt
== VT_FLOAT
) {
2133 o(0x140f66); /* unpcklpd */
2134 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2135 o(0x5a0f66); /* cvtpd2ps */
2136 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2137 } else if (tbt
== VT_LDOUBLE
) {
2139 /* movsd %xmm0,-0x10(%rsp) */
2141 o(0x44 + REG_VALUE(vtop
->r
)*8);
2143 o(0xf02444dd); /* fldl -0x10(%rsp) */
2149 r
= get_reg(RC_FLOAT
);
2150 if (tbt
== VT_DOUBLE
) {
2151 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2152 /* movsd -0x10(%rsp),%xmm0 */
2154 o(0x44 + REG_VALUE(r
)*8);
2157 } else if (tbt
== VT_FLOAT
) {
2158 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2159 /* movss -0x10(%rsp),%xmm0 */
2161 o(0x44 + REG_VALUE(r
)*8);
2168 /* convert fp to int 't' type */
2169 void gen_cvt_ftoi(int t
)
2171 int ft
, bt
, size
, r
;
2174 if (bt
== VT_LDOUBLE
) {
2175 gen_cvt_ftof(VT_DOUBLE
);
2185 r
= get_reg(RC_INT
);
2186 if (bt
== VT_FLOAT
) {
2188 } else if (bt
== VT_DOUBLE
) {
2193 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2194 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2198 // Generate sign extension from 32 to 64 bits:
2199 ST_FUNC
void gen_cvt_sxtw(void)
2202 /* x86_64 specific: movslq */
2204 o(0xc0 + (REG_VALUE(r
) << 3) + REG_VALUE(r
));
2207 /* char/short to int conversion */
2208 ST_FUNC
void gen_cvt_csti(int t
)
2212 sz
= !(t
& VT_UNSIGNED
);
2213 xl
= (t
& VT_BTYPE
) == VT_SHORT
;
2214 ll
= (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
;
2215 orex(ll
, r
, 0, 0xc0b60f /* mov[sz] %a[xl], %eax */
2216 | (sz
<< 3 | xl
) << 8
2217 | (REG_VALUE(r
) << 3 | REG_VALUE(r
)) << 16
2221 /* increment tcov counter */
2222 ST_FUNC
void gen_increment_tcov (SValue
*sv
)
2224 o(0x058348); /* addq $1, xxx(%rip) */
2225 greloca(cur_text_section
, sv
->sym
, ind
, R_X86_64_PC32
, -5);
2230 /* computed goto support */
2237 /* Save the stack pointer onto the stack and return the location of its address */
2238 ST_FUNC
void gen_vla_sp_save(int addr
) {
2239 /* mov %rsp,addr(%rbp)*/
2240 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2243 /* Restore the SP from a location on the stack */
2244 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2245 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2248 #ifdef TCC_TARGET_PE
2249 /* Save result of gen_vla_alloc onto the stack */
2250 ST_FUNC
void gen_vla_result(int addr
) {
2251 /* mov %rax,addr(%rbp)*/
2252 gen_modrm64(0x89, TREG_RAX
, VT_LOCAL
, NULL
, addr
);
2256 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2257 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2260 #if defined(CONFIG_TCC_BCHECK)
2261 use_call
= tcc_state
->do_bounds_check
;
2263 #ifdef TCC_TARGET_PE /* alloca does more than just adjust %rsp on Windows */
2268 vpush_helper_func(TOK_alloca
);
2269 vswap(); /* Move alloca ref past allocation size */
2274 r
= gv(RC_INT
); /* allocation size */
2277 o(0xe0 | REG_VALUE(r
));
2278 /* We align to 16 bytes rather than align */
2286 /* end of x86-64 code generator */
2287 /*************************************************************/
2288 #endif /* ! TARGET_DEFS_ONLY */
2289 /******************************************************/