2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
39 #define RC_XMM0 0x0020
40 #define RC_ST0 0x0040 /* only for long double */
41 #define RC_IRET RC_RAX /* function return: integer register */
42 #define RC_LRET RC_RDX /* function return: second integer register */
43 #define RC_FRET RC_XMM0 /* function return: float register */
45 /* pretty names for the registers */
64 #define REX_BASE(reg) (((reg) >> 3) & 1)
65 #define REG_VALUE(reg) ((reg) & 7)
67 /* return registers for function */
68 #define REG_IRET TREG_RAX /* single word int return register */
69 #define REG_LRET TREG_RDX /* second word return register (for long long) */
70 #define REG_FRET TREG_XMM0 /* float return register */
72 /* defined if function parameters must be evaluated in reverse order */
73 #define INVERT_FUNC_PARAMS
75 /* pointer size, in bytes */
78 /* long double size and alignment, in bytes */
79 #define LDOUBLE_SIZE 16
80 #define LDOUBLE_ALIGN 8
81 /* maximum alignment (for aligned attribute support) */
84 ST_FUNC
void gen_opl(int op
);
85 ST_FUNC
void gen_le64(int64_t c
);
87 /******************************************************/
90 #define EM_TCC_TARGET EM_X86_64
92 /* relocation type for 32 bit data relocation */
93 #define R_DATA_32 R_X86_64_32
94 #define R_DATA_PTR R_X86_64_64
95 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
96 #define R_COPY R_X86_64_COPY
98 #define ELF_START_ADDR 0x08048000
99 #define ELF_PAGE_SIZE 0x1000
101 /******************************************************/
102 #else /* ! TARGET_DEFS_ONLY */
103 /******************************************************/
107 ST_DATA
const int reg_classes
[NB_REGS
] = {
108 /* eax */ RC_INT
| RC_RAX
,
109 /* ecx */ RC_INT
| RC_RCX
,
110 /* edx */ RC_INT
| RC_RDX
,
111 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
122 static unsigned long func_sub_sp_offset
;
123 static int func_ret_sub
;
125 /* XXX: make it faster ? */
130 if (ind1
> cur_text_section
->data_allocated
)
131 section_realloc(cur_text_section
, ind1
);
132 cur_text_section
->data
[ind
] = c
;
136 void o(unsigned int c
)
158 void gen_le64(int64_t c
)
170 void orex(int ll
, int r
, int r2
, int b
)
172 if ((r
& VT_VALMASK
) >= VT_CONST
)
174 if ((r2
& VT_VALMASK
) >= VT_CONST
)
176 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
177 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
181 /* output a symbol and patch all calls to it */
182 void gsym_addr(int t
, int a
)
186 ptr
= (int *)(cur_text_section
->data
+ t
);
187 n
= *ptr
; /* next value */
198 /* psym is used to put an instruction with a data field which is a
199 reference to a symbol. It is in fact the same as oad ! */
202 static int is64_type(int t
)
204 return ((t
& VT_BTYPE
) == VT_PTR
||
205 (t
& VT_BTYPE
) == VT_FUNC
||
206 (t
& VT_BTYPE
) == VT_LLONG
);
209 static int is_sse_float(int t
) {
212 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
216 /* instruction + 4 bytes data. Return the address of the data */
217 ST_FUNC
int oad(int c
, int s
)
223 if (ind1
> cur_text_section
->data_allocated
)
224 section_realloc(cur_text_section
, ind1
);
225 *(int *)(cur_text_section
->data
+ ind
) = s
;
231 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
234 greloc(cur_text_section
, sym
, ind
, R_X86_64_32
);
238 /* output constant with relocation if 'r & VT_SYM' is true */
239 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
242 greloc(cur_text_section
, sym
, ind
, R_X86_64_64
);
246 /* output constant with relocation if 'r & VT_SYM' is true */
247 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
250 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
254 /* output got address with relocation */
255 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
257 #ifndef TCC_TARGET_PE
260 greloc(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
);
261 sr
= cur_text_section
->reloc
;
262 rel
= (ElfW(Rela
) *)(sr
->data
+ sr
->data_offset
- sizeof(ElfW(Rela
)));
265 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym
->v
, NULL
), c
, r
,
266 cur_text_section
->data
[ind
-3],
267 cur_text_section
->data
[ind
-2],
268 cur_text_section
->data
[ind
-1]
270 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
274 /* we use add c, %xxx for displacement */
276 o(0xc0 + REG_VALUE(r
));
281 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
283 op_reg
= REG_VALUE(op_reg
) << 3;
284 if ((r
& VT_VALMASK
) == VT_CONST
) {
285 /* constant memory reference */
288 gen_gotpcrel(r
, sym
, c
);
290 gen_addrpc32(r
, sym
, c
);
292 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
293 /* currently, we use only ebp as base */
295 /* short reference */
299 oad(0x85 | op_reg
, c
);
301 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
303 g(0x80 | op_reg
| REG_VALUE(r
));
306 g(0x00 | op_reg
| REG_VALUE(r
));
309 g(0x00 | op_reg
| REG_VALUE(r
));
313 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
315 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
317 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
320 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
322 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
325 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
326 orex(1, r
, op_reg
, opcode
);
327 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
331 /* load 'r' from value 'sv' */
332 void load(int r
, SValue
*sv
)
334 int v
, t
, ft
, fc
, fr
;
339 sv
= pe_getimport(sv
, &v2
);
346 #ifndef TCC_TARGET_PE
347 /* we use indirect access via got */
348 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
349 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
350 /* use the result register as a temporal register */
351 int tr
= r
| TREG_MEM
;
353 /* we cannot use float registers as a temporal register */
354 tr
= get_reg(RC_INT
) | TREG_MEM
;
356 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
358 /* load from the temporal register */
366 if (v
== VT_LLOCAL
) {
368 v1
.r
= VT_LOCAL
| VT_LVAL
;
374 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
375 b
= 0x6e0f66, r
= 0; /* movd */
376 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
377 b
= 0x7e0ff3, r
= 0; /* movq */
378 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
379 b
= 0xdb, r
= 5; /* fldt */
380 } else if ((ft
& VT_TYPE
) == VT_BYTE
) {
381 b
= 0xbe0f; /* movsbl */
382 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
383 b
= 0xb60f; /* movzbl */
384 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
385 b
= 0xbf0f; /* movswl */
386 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
387 b
= 0xb70f; /* movzwl */
393 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
396 gen_modrm(r
, fr
, sv
->sym
, fc
);
403 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
404 gen_addrpc32(fr
, sv
->sym
, fc
);
406 if (sv
->sym
->type
.t
& VT_STATIC
) {
408 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
409 gen_addrpc32(fr
, sv
->sym
, fc
);
412 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
413 gen_gotpcrel(fr
, sv
->sym
, fc
);
416 } else if (is64_type(ft
)) {
417 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
420 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
423 } else if (v
== VT_LOCAL
) {
424 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
425 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
426 } else if (v
== VT_CMP
) {
428 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
429 orex(0,r
,0, 0x0f); /* setxx %br */
431 o(0xc0 + REG_VALUE(r
));
432 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
435 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
436 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
439 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
441 if (r
== TREG_XMM0
) {
442 assert(v
== TREG_ST0
);
443 /* gen_cvt_ftof(VT_DOUBLE); */
444 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
445 /* movsd -0x10(%rsp),%xmm0 */
448 } else if (r
== TREG_ST0
) {
449 assert(v
== TREG_XMM0
);
450 /* gen_cvt_ftof(VT_LDOUBLE); */
451 /* movsd %xmm0,-0x10(%rsp) */
454 o(0xf02444dd); /* fldl -0x10(%rsp) */
457 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
463 /* store register 'r' in lvalue 'v' */
464 void store(int r
, SValue
*v
)
468 /* store the REX prefix in this variable when PIC is enabled */
473 v
= pe_getimport(v
, &v2
);
478 fr
= v
->r
& VT_VALMASK
;
481 #ifndef TCC_TARGET_PE
482 /* we need to access the variable via got */
483 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
484 /* mov xx(%rip), %r11 */
486 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.ul
);
487 pic
= is64_type(bt
) ? 0x49 : 0x41;
491 /* XXX: incorrect if float reg to reg */
492 if (bt
== VT_FLOAT
) {
495 o(0x7e0f); /* movd */
497 } else if (bt
== VT_DOUBLE
) {
500 o(0xd60f); /* movq */
502 } else if (bt
== VT_LDOUBLE
) {
503 o(0xc0d9); /* fld %st(0) */
511 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
513 else if (is64_type(bt
))
519 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
524 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
525 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
526 } else if (fr
!= r
) {
527 /* XXX: don't we really come here? */
529 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
532 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
533 gen_modrm(r
, v
->r
, v
->sym
, fc
);
534 } else if (fr
!= r
) {
535 /* XXX: don't we really come here? */
537 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
542 /* 'is_jmp' is '1' if it is a jump */
543 static void gcall_or_jmp(int is_jmp
)
546 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
548 if (vtop
->r
& VT_SYM
) {
549 /* relocation case */
550 greloc(cur_text_section
, vtop
->sym
,
551 ind
+ 1, R_X86_64_PC32
);
553 /* put an empty PC32 relocation */
554 put_elf_reloc(symtab_section
, cur_text_section
,
555 ind
+ 1, R_X86_64_PC32
, 0);
557 oad(0xe8 + is_jmp
, vtop
->c
.ul
- 4); /* call/jmp im */
559 /* otherwise, indirect call */
563 o(0xff); /* call/jmp *r */
564 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
571 static const uint8_t arg_regs
[] = {
572 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
575 static int func_scratch
;
577 /* Generate function call. The function address is pushed first, then
578 all the parameters in call order. This functions pops all the
579 parameters and the function address. */
581 void gen_offs_sp(int b
, int r
, int d
)
583 orex(1,0,r
& 0x100 ? 0 : r
, b
);
585 o(0x2444 | (REG_VALUE(r
) << 3));
588 o(0x2484 | (REG_VALUE(r
) << 3));
593 void gfunc_call(int nb_args
)
595 int size
, align
, r
, args_size
, i
, d
, j
, bt
;
596 int nb_reg_args
, gen_reg
;
598 /* calculate the number of integer/float arguments */
600 for(i
= 0; i
< nb_args
; i
++) {
601 bt
= (vtop
[-i
].type
.t
& VT_BTYPE
);
602 if (bt
!= VT_STRUCT
&& bt
!= VT_LDOUBLE
)
606 args_size
= (nb_reg_args
< REGN
? REGN
: nb_reg_args
) * PTR_SIZE
;
608 /* for struct arguments, we need to call memcpy and the function
609 call breaks register passing arguments we are preparing.
610 So, we process arguments which will be passed by stack first. */
611 for(i
= 0; i
< nb_args
; i
++) {
612 SValue
*sv
= &vtop
[-i
];
613 bt
= (sv
->type
.t
& VT_BTYPE
);
614 if (bt
== VT_STRUCT
) {
615 size
= type_size(&sv
->type
, &align
);
616 /* align to stack align size */
617 size
= (size
+ 15) & ~15;
618 /* generate structure store */
620 gen_offs_sp(0x8d, r
, args_size
);
623 /* generate memcpy call */
624 vset(&sv
->type
, r
| VT_LVAL
, 0);
629 } else if (bt
== VT_LDOUBLE
) {
632 gen_offs_sp(0xdb, 0x107, args_size
);
638 if (func_scratch
< args_size
)
639 func_scratch
= args_size
;
641 for (i
= 0; i
< REGN
; ++i
)
642 save_reg(arg_regs
[i
]);
644 gen_reg
= nb_reg_args
;
645 for(i
= 0; i
< nb_args
; i
++) {
646 bt
= (vtop
->type
.t
& VT_BTYPE
);
647 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
649 } else if (is_sse_float(vtop
->type
.t
)) {
650 gv(RC_FLOAT
); /* only one float register */
653 /* movq %xmm0, j*8(%rsp) */
654 gen_offs_sp(0xd60f66, 0x100, j
*8);
656 /* movaps %xmm0, %xmmN */
660 /* mov %xmm0, %rxx */
663 o(0xc0 + REG_VALUE(d
));
669 gen_offs_sp(0x89, r
, j
*8);
673 gv(reg_classes
[d
] & ~RC_INT
);
678 o(0xc0 + REG_VALUE(d
) + REG_VALUE(r
) * 8);
692 #define FUNC_PROLOG_SIZE 11
694 /* generate function prolog of type 't' */
695 void gfunc_prolog(CType
*func_type
)
697 int addr
, align
, size
, reg_param_index
, bt
;
706 ind
+= FUNC_PROLOG_SIZE
;
707 func_sub_sp_offset
= ind
;
710 sym
= func_type
->ref
;
712 /* if the function returns a structure, then add an
713 implicit pointer parameter */
715 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
716 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
721 /* define parameters */
722 while ((sym
= sym
->next
) != NULL
) {
724 bt
= type
->t
& VT_BTYPE
;
725 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
)
727 if (reg_param_index
< REGN
) {
728 /* save arguments passed by register */
729 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
731 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
736 while (reg_param_index
< REGN
) {
737 if (func_type
->ref
->c
== FUNC_ELLIPSIS
)
738 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
743 sym
= func_type
->ref
;
744 while ((sym
= sym
->next
) != NULL
) {
746 bt
= type
->t
& VT_BTYPE
;
747 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
748 size
= type_size(type
, &align
);
749 size
= (size
+ 15) & -16;
750 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
756 /* generate function epilog */
757 void gfunc_epilog(void)
762 if (func_ret_sub
== 0) {
767 g(func_ret_sub
>> 8);
771 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
772 /* align local size to word & save local variables */
773 v
= (func_scratch
+ -loc
+ 15) & -16;
776 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
777 oad(0xb8, v
); /* mov stacksize, %eax */
778 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
779 greloc(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
);
780 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
782 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
783 o(0xec8148); /* sub rsp, stacksize */
787 cur_text_section
->data_offset
= saved_ind
;
788 pe_add_unwind_data(ind
, saved_ind
, v
);
789 ind
= cur_text_section
->data_offset
;
794 static void gadd_sp(int val
)
796 if (val
== (char)val
) {
800 oad(0xc48148, val
); /* add $xxx, %rsp */
805 static const uint8_t arg_regs
[REGN
] = {
806 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
809 /* Generate function call. The function address is pushed first, then
810 all the parameters in call order. This functions pops all the
811 parameters and the function address. */
812 void gfunc_call(int nb_args
)
814 int size
, align
, r
, args_size
, i
;
818 int sse_reg
, gen_reg
;
820 /* calculate the number of integer/float arguments */
822 for(i
= 0; i
< nb_args
; i
++) {
823 if ((vtop
[-i
].type
.t
& VT_BTYPE
) == VT_STRUCT
) {
824 args_size
+= type_size(&vtop
[-i
].type
, &align
);
825 args_size
= (args_size
+ 7) & ~7;
826 } else if ((vtop
[-i
].type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
828 } else if (is_sse_float(vtop
[-i
].type
.t
)) {
830 if (nb_sse_args
> 8) args_size
+= 8;
833 if (nb_reg_args
> REGN
) args_size
+= 8;
837 /* for struct arguments, we need to call memcpy and the function
838 call breaks register passing arguments we are preparing.
839 So, we process arguments which will be passed by stack first. */
841 gen_reg
= nb_reg_args
;
842 sse_reg
= nb_sse_args
;
844 /* adjust stack to align SSE boundary */
845 if (args_size
&= 15) {
846 /* fetch cpu flag before the following sub will change the value */
847 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
850 args_size
= 16 - args_size
;
852 oad(0xec81, args_size
); /* sub $xxx, %rsp */
855 for(i
= 0; i
< nb_args
; i
++) {
856 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
) {
857 size
= type_size(&vtop
->type
, &align
);
858 /* align to stack align size */
859 size
= (size
+ 7) & ~7;
860 /* allocate the necessary size on stack */
862 oad(0xec81, size
); /* sub $xxx, %rsp */
863 /* generate structure store */
865 orex(1, r
, 0, 0x89); /* mov %rsp, r */
866 o(0xe0 + REG_VALUE(r
));
868 /* following code breaks vtop[1], vtop[2], and vtop[3] */
869 SValue tmp1
= vtop
[1];
870 SValue tmp2
= vtop
[2];
871 SValue tmp3
= vtop
[3];
872 vset(&vtop
->type
, r
| VT_LVAL
, 0);
880 } else if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
883 oad(0xec8148, size
); /* sub $xxx, %rsp */
884 o(0x7cdb); /* fstpt 0(%rsp) */
888 } else if (is_sse_float(vtop
->type
.t
)) {
892 o(0x50); /* push $rax */
893 /* movq %xmm0, (%rsp) */
901 /* XXX: implicit cast ? */
904 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
912 save_regs(0); /* save used temporary registers */
914 /* then, we prepare register passing arguments.
915 Note that we cannot set RDX and RCX in this loop because gv()
916 may break these temporary registers. Let's use R10 and R11
918 gen_reg
= nb_reg_args
;
919 sse_reg
= nb_sse_args
;
920 for(i
= 0; i
< nb_args
; i
++) {
921 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
||
922 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
923 } else if (is_sse_float(vtop
->type
.t
)) {
926 gv(RC_FLOAT
); /* only one float register */
927 /* movaps %xmm0, %xmmN */
929 o(0xc0 + (sse_reg
<< 3));
934 /* XXX: implicit cast ? */
938 if (j
== 2 || j
== 3)
939 /* j=2: r10, j=3: r11 */
941 orex(1,d
,r
,0x89); /* mov */
942 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
948 /* Copy R10 and R11 into RDX and RCX, respectively */
949 if (nb_reg_args
> 2) {
950 o(0xd2894c); /* mov %r10, %rdx */
951 if (nb_reg_args
> 3) {
952 o(0xd9894c); /* mov %r11, %rcx */
956 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
964 #define FUNC_PROLOG_SIZE 11
966 static void push_arg_reg(int i
) {
968 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
971 /* generate function prolog of type 't' */
972 void gfunc_prolog(CType
*func_type
)
974 int i
, addr
, align
, size
;
975 int param_index
, param_addr
, reg_param_index
, sse_param_index
;
979 sym
= func_type
->ref
;
982 ind
+= FUNC_PROLOG_SIZE
;
983 func_sub_sp_offset
= ind
;
986 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
987 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
988 seen_reg_num
= seen_sse_num
= 0;
989 /* frame pointer and return address */
990 seen_stack_size
= PTR_SIZE
* 2;
991 /* count the number of seen parameters */
992 sym
= func_type
->ref
;
993 while ((sym
= sym
->next
) != NULL
) {
995 if (is_sse_float(type
->t
)) {
996 if (seen_sse_num
< 8) {
999 seen_stack_size
+= 8;
1001 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
1002 size
= type_size(type
, &align
);
1003 size
= (size
+ 7) & ~7;
1004 seen_stack_size
+= size
;
1005 } else if ((type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1006 seen_stack_size
+= LDOUBLE_SIZE
;
1008 if (seen_reg_num
< REGN
) {
1011 seen_stack_size
+= 8;
1017 /* movl $0x????????, -0x10(%rbp) */
1019 gen_le32(seen_reg_num
* 8);
1020 /* movl $0x????????, -0xc(%rbp) */
1022 gen_le32(seen_sse_num
* 16 + 48);
1023 /* movl $0x????????, -0x8(%rbp) */
1025 gen_le32(seen_stack_size
);
1027 /* save all register passing arguments */
1028 for (i
= 0; i
< 8; i
++) {
1030 o(0xd60f66); /* movq */
1031 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1032 /* movq $0, loc+8(%rbp) */
1037 for (i
= 0; i
< REGN
; i
++) {
1038 push_arg_reg(REGN
-1-i
);
1042 sym
= func_type
->ref
;
1044 reg_param_index
= 0;
1045 sse_param_index
= 0;
1047 /* if the function returns a structure, then add an
1048 implicit pointer parameter */
1049 func_vt
= sym
->type
;
1050 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
1051 push_arg_reg(reg_param_index
);
1058 /* define parameters */
1059 while ((sym
= sym
->next
) != NULL
) {
1061 size
= type_size(type
, &align
);
1062 size
= (size
+ 7) & ~7;
1063 if (is_sse_float(type
->t
)) {
1064 if (sse_param_index
< 8) {
1065 /* save arguments passed by register */
1067 o(0xd60f66); /* movq */
1068 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, loc
);
1076 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
||
1077 (type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1081 if (reg_param_index
< REGN
) {
1082 /* save arguments passed by register */
1083 push_arg_reg(reg_param_index
);
1091 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1092 VT_LOCAL
| VT_LVAL
, param_addr
);
1097 /* generate function epilog */
1098 void gfunc_epilog(void)
1102 o(0xc9); /* leave */
1103 if (func_ret_sub
== 0) {
1106 o(0xc2); /* ret n */
1108 g(func_ret_sub
>> 8);
1110 /* align local size to word & save local variables */
1111 v
= (-loc
+ 15) & -16;
1113 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1114 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1115 o(0xec8148); /* sub rsp, stacksize */
1122 /* generate a jump to a label */
1125 return psym(0xe9, t
);
1128 /* generate a jump to a fixed address */
1129 void gjmp_addr(int a
)
1137 oad(0xe9, a
- ind
- 5);
1141 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1142 int gtst(int inv
, int t
)
1146 v
= vtop
->r
& VT_VALMASK
;
1148 /* fast case : can jump directly since flags are set */
1150 t
= psym((vtop
->c
.i
- 16) ^ inv
, t
);
1151 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1152 /* && or || optimization */
1153 if ((v
& 1) == inv
) {
1154 /* insert vtop->c jump list in t */
1157 p
= (int *)(cur_text_section
->data
+ *p
);
1165 if (is_float(vtop
->type
.t
) ||
1166 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1170 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1171 /* constant jmp optimization */
1172 if ((vtop
->c
.i
!= 0) != inv
)
1177 o(0xc0 + REG_VALUE(v
) * 9);
1179 t
= psym(0x85 ^ inv
, t
);
1186 /* generate an integer binary operation */
1187 void gen_opi(int op
)
1192 ll
= is64_type(vtop
[-1].type
.t
);
1193 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1194 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1198 case TOK_ADDC1
: /* add with carry generation */
1201 if (cc
&& (!ll
|| (int)vtop
->c
.ll
== vtop
->c
.ll
)) {
1208 /* XXX: generate inc and dec for smaller code ? */
1209 orex(ll
, r
, 0, 0x83);
1210 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1213 orex(ll
, r
, 0, 0x81);
1214 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1217 gv2(RC_INT
, RC_INT
);
1220 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1221 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1224 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1230 case TOK_SUBC1
: /* sub with carry generation */
1233 case TOK_ADDC2
: /* add with carry use */
1236 case TOK_SUBC2
: /* sub with carry use */
1249 gv2(RC_INT
, RC_INT
);
1252 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1253 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1265 opc
= 0xc0 | (opc
<< 3);
1271 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1272 o(opc
| REG_VALUE(r
));
1273 g(vtop
->c
.i
& (ll
? 63 : 31));
1275 /* we generate the shift in ecx */
1276 gv2(RC_INT
, RC_RCX
);
1278 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1279 o(opc
| REG_VALUE(r
));
1292 /* first operand must be in eax */
1293 /* XXX: need better constraint for second operand */
1294 gv2(RC_RAX
, RC_RCX
);
1299 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1300 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1301 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1302 if (op
== '%' || op
== TOK_UMOD
)
1314 void gen_opl(int op
)
1319 /* generate a floating point operation 'v = t1 op t2' instruction. The
1320 two operands are guaranted to have the same floating point type */
1321 /* XXX: need to use ST1 too */
1322 void gen_opf(int op
)
1324 int a
, ft
, fc
, swapped
, r
;
1326 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1328 /* convert constants to memory references */
1329 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1334 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1337 /* must put at least one value in the floating point register */
1338 if ((vtop
[-1].r
& VT_LVAL
) &&
1339 (vtop
[0].r
& VT_LVAL
)) {
1345 /* swap the stack if needed so that t1 is the register and t2 is
1346 the memory reference */
1347 if (vtop
[-1].r
& VT_LVAL
) {
1351 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1352 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1353 /* load on stack second operand */
1354 load(TREG_ST0
, vtop
);
1355 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1356 if (op
== TOK_GE
|| op
== TOK_GT
)
1358 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1361 o(0xc9d9); /* fxch %st(1) */
1362 o(0xe9da); /* fucompp */
1363 o(0xe0df); /* fnstsw %ax */
1365 o(0x45e480); /* and $0x45, %ah */
1366 o(0x40fC80); /* cmp $0x40, %ah */
1367 } else if (op
== TOK_NE
) {
1368 o(0x45e480); /* and $0x45, %ah */
1369 o(0x40f480); /* xor $0x40, %ah */
1371 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1372 o(0x05c4f6); /* test $0x05, %ah */
1375 o(0x45c4f6); /* test $0x45, %ah */
1382 /* no memory reference possible for long double operations */
1383 load(TREG_ST0
, vtop
);
1407 o(0xde); /* fxxxp %st, %st(1) */
1412 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1413 /* if saved lvalue, then we must reload it */
1416 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1418 r
= get_reg(RC_INT
);
1420 v1
.r
= VT_LOCAL
| VT_LVAL
;
1426 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1429 if (op
== TOK_LE
|| op
== TOK_LT
)
1431 if (op
== TOK_LE
|| op
== TOK_GE
) {
1432 op
= 0x93; /* setae */
1434 op
= 0x97; /* seta */
1439 o(0x7e0ff3); /* movq */
1440 gen_modrm(1, r
, vtop
->sym
, fc
);
1442 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1445 o(0x2e0f); /* ucomisd %xmm0, %xmm1 */
1448 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1451 o(0x2e0f); /* ucomisd */
1452 gen_modrm(0, r
, vtop
->sym
, fc
);
1459 /* no memory reference possible for long double operations */
1460 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1461 load(TREG_XMM0
, vtop
);
1481 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
1482 o(0xde); /* fxxxp %st, %st(1) */
1485 /* if saved lvalue, then we must reload it */
1487 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1489 r
= get_reg(RC_INT
);
1491 v1
.r
= VT_LOCAL
| VT_LVAL
;
1497 /* movq %xmm0,%xmm1 */
1500 load(TREG_XMM0
, vtop
);
1501 /* subsd %xmm1,%xmm0 (f2 0f 5c c1) */
1502 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1511 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1518 gen_modrm(0, r
, vtop
->sym
, fc
);
1526 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1527 and 'long long' cases. */
1528 void gen_cvt_itof(int t
)
1530 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
1533 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1534 /* signed long long to float/double/long double (unsigned case
1535 is handled generically) */
1536 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1537 o(0x242cdf); /* fildll (%rsp) */
1538 o(0x08c48348); /* add $8, %rsp */
1539 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1540 (VT_INT
| VT_UNSIGNED
)) {
1541 /* unsigned int to float/double/long double */
1542 o(0x6a); /* push $0 */
1544 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1545 o(0x242cdf); /* fildll (%rsp) */
1546 o(0x10c48348); /* add $16, %rsp */
1548 /* int to float/double/long double */
1549 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1550 o(0x2404db); /* fildl (%rsp) */
1551 o(0x08c48348); /* add $8, %rsp */
1555 save_reg(TREG_XMM0
);
1557 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
));
1558 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1559 (VT_INT
| VT_UNSIGNED
) ||
1560 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1564 o(0xc0 + (vtop
->r
& VT_VALMASK
)); /* cvtsi2sd */
1565 vtop
->r
= TREG_XMM0
;
1569 /* convert from one floating point type to another */
1570 void gen_cvt_ftof(int t
)
1578 if (bt
== VT_FLOAT
) {
1580 if (tbt
== VT_DOUBLE
) {
1581 o(0xc0140f); /* unpcklps */
1582 o(0xc05a0f); /* cvtps2pd */
1583 } else if (tbt
== VT_LDOUBLE
) {
1584 /* movss %xmm0,-0x10(%rsp) */
1587 o(0xf02444d9); /* flds -0x10(%rsp) */
1590 } else if (bt
== VT_DOUBLE
) {
1592 if (tbt
== VT_FLOAT
) {
1593 o(0xc0140f66); /* unpcklpd */
1594 o(0xc05a0f66); /* cvtpd2ps */
1595 } else if (tbt
== VT_LDOUBLE
) {
1596 /* movsd %xmm0,-0x10(%rsp) */
1599 o(0xf02444dd); /* fldl -0x10(%rsp) */
1604 if (tbt
== VT_DOUBLE
) {
1605 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
1606 /* movsd -0x10(%rsp),%xmm0 */
1609 vtop
->r
= TREG_XMM0
;
1610 } else if (tbt
== VT_FLOAT
) {
1611 o(0xf0245cd9); /* fstps -0x10(%rsp) */
1612 /* movss -0x10(%rsp),%xmm0 */
1615 vtop
->r
= TREG_XMM0
;
1620 /* convert fp to int 't' type */
1621 void gen_cvt_ftoi(int t
)
1623 int ft
, bt
, size
, r
;
1626 if (bt
== VT_LDOUBLE
) {
1627 gen_cvt_ftof(VT_DOUBLE
);
1637 r
= get_reg(RC_INT
);
1638 if (bt
== VT_FLOAT
) {
1640 } else if (bt
== VT_DOUBLE
) {
1645 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
1646 o(0xc0 + (REG_VALUE(r
) << 3));
1650 /* computed goto support */
1657 /* end of x86-64 code generator */
1658 /*************************************************************/
1659 #endif /* ! TARGET_DEFS_ONLY */
1660 /******************************************************/