2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
41 #define RC_XMM0 0x0020
42 #define RC_ST0 0x0040 /* only for long double */
43 #define RC_IRET RC_RAX /* function return: integer register */
44 #define RC_LRET RC_RDX /* function return: second integer register */
45 #define RC_FRET RC_XMM0 /* function return: float register */
47 /* pretty names for the registers */
66 #define REX_BASE(reg) (((reg) >> 3) & 1)
67 #define REG_VALUE(reg) ((reg) & 7)
69 /* return registers for function */
70 #define REG_IRET TREG_RAX /* single word int return register */
71 #define REG_LRET TREG_RDX /* second word return register (for long long) */
72 #define REG_FRET TREG_XMM0 /* float return register */
74 /* defined if function parameters must be evaluated in reverse order */
75 #define INVERT_FUNC_PARAMS
77 /* pointer size, in bytes */
80 /* long double size and alignment, in bytes */
81 #define LDOUBLE_SIZE 16
82 #define LDOUBLE_ALIGN 8
83 /* maximum alignment (for aligned attribute support) */
86 /******************************************************/
89 #define EM_TCC_TARGET EM_X86_64
91 /* relocation type for 32 bit data relocation */
92 #define R_DATA_32 R_X86_64_32
93 #define R_DATA_PTR R_X86_64_64
94 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
95 #define R_COPY R_X86_64_COPY
97 #define ELF_START_ADDR 0x08048000
98 #define ELF_PAGE_SIZE 0x1000
100 /******************************************************/
101 #else /* ! TARGET_DEFS_ONLY */
102 /******************************************************/
106 ST_DATA
const int reg_classes
[NB_REGS
+7] = {
107 /* eax */ RC_INT
| RC_RAX
,
108 /* ecx */ RC_INT
| RC_RCX
,
109 /* edx */ RC_INT
| RC_RDX
,
110 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
121 static unsigned long func_sub_sp_offset
;
122 static int func_ret_sub
;
124 /* XXX: make it faster ? */
129 if (ind1
> cur_text_section
->data_allocated
)
130 section_realloc(cur_text_section
, ind1
);
131 cur_text_section
->data
[ind
] = c
;
135 void o(unsigned int c
)
157 void gen_le64(int64_t c
)
169 void orex(int ll
, int r
, int r2
, int b
)
171 if ((r
& VT_VALMASK
) >= VT_CONST
)
173 if ((r2
& VT_VALMASK
) >= VT_CONST
)
175 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
176 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
180 /* output a symbol and patch all calls to it */
181 void gsym_addr(int t
, int a
)
185 ptr
= (int *)(cur_text_section
->data
+ t
);
186 n
= *ptr
; /* next value */
197 /* psym is used to put an instruction with a data field which is a
198 reference to a symbol. It is in fact the same as oad ! */
201 static int is64_type(int t
)
203 return ((t
& VT_BTYPE
) == VT_PTR
||
204 (t
& VT_BTYPE
) == VT_FUNC
||
205 (t
& VT_BTYPE
) == VT_LLONG
);
208 static int is_sse_float(int t
) {
211 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
215 /* instruction + 4 bytes data. Return the address of the data */
216 ST_FUNC
int oad(int c
, int s
)
222 if (ind1
> cur_text_section
->data_allocated
)
223 section_realloc(cur_text_section
, ind1
);
224 *(int *)(cur_text_section
->data
+ ind
) = s
;
230 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
233 greloc(cur_text_section
, sym
, ind
, R_X86_64_32
);
237 /* output constant with relocation if 'r & VT_SYM' is true */
238 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
241 greloc(cur_text_section
, sym
, ind
, R_X86_64_64
);
245 /* output constant with relocation if 'r & VT_SYM' is true */
246 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
249 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
253 /* output got address with relocation */
254 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
256 #ifndef TCC_TARGET_PE
259 greloc(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
);
260 sr
= cur_text_section
->reloc
;
261 rel
= (ElfW(Rela
) *)(sr
->data
+ sr
->data_offset
- sizeof(ElfW(Rela
)));
264 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym
->v
, NULL
), c
, r
,
265 cur_text_section
->data
[ind
-3],
266 cur_text_section
->data
[ind
-2],
267 cur_text_section
->data
[ind
-1]
269 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
273 /* we use add c, %xxx for displacement */
275 o(0xc0 + REG_VALUE(r
));
280 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
282 op_reg
= REG_VALUE(op_reg
) << 3;
283 if ((r
& VT_VALMASK
) == VT_CONST
) {
284 /* constant memory reference */
287 gen_gotpcrel(r
, sym
, c
);
289 gen_addrpc32(r
, sym
, c
);
291 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
292 /* currently, we use only ebp as base */
294 /* short reference */
298 oad(0x85 | op_reg
, c
);
300 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
302 g(0x80 | op_reg
| REG_VALUE(r
));
305 g(0x00 | op_reg
| REG_VALUE(r
));
308 g(0x00 | op_reg
| REG_VALUE(r
));
312 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
314 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
316 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
319 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
321 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
324 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
325 orex(1, r
, op_reg
, opcode
);
326 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
330 /* load 'r' from value 'sv' */
331 void load(int r
, SValue
*sv
)
333 int v
, t
, ft
, fc
, fr
;
338 sv
= pe_getimport(sv
, &v2
);
345 #ifndef TCC_TARGET_PE
346 /* we use indirect access via got */
347 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
348 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
349 /* use the result register as a temporal register */
350 int tr
= r
| TREG_MEM
;
352 /* we cannot use float registers as a temporal register */
353 tr
= get_reg(RC_INT
) | TREG_MEM
;
355 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
357 /* load from the temporal register */
365 if (v
== VT_LLOCAL
) {
367 v1
.r
= VT_LOCAL
| VT_LVAL
;
370 if (!(reg_classes
[fr
] & RC_INT
))
371 fr
= get_reg(RC_INT
);
375 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
376 b
= 0x6e0f66, r
= 0; /* movd */
377 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
378 b
= 0x7e0ff3, r
= 0; /* movq */
379 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
380 b
= 0xdb, r
= 5; /* fldt */
381 } else if ((ft
& VT_TYPE
) == VT_BYTE
) {
382 b
= 0xbe0f; /* movsbl */
383 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
384 b
= 0xb60f; /* movzbl */
385 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
386 b
= 0xbf0f; /* movswl */
387 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
388 b
= 0xb70f; /* movzwl */
394 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
397 gen_modrm(r
, fr
, sv
->sym
, fc
);
404 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
405 gen_addrpc32(fr
, sv
->sym
, fc
);
407 if (sv
->sym
->type
.t
& VT_STATIC
) {
409 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
410 gen_addrpc32(fr
, sv
->sym
, fc
);
413 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
414 gen_gotpcrel(r
, sv
->sym
, fc
);
417 } else if (is64_type(ft
)) {
418 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
421 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
424 } else if (v
== VT_LOCAL
) {
425 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
426 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
427 } else if (v
== VT_CMP
) {
429 if ((fc
& ~0x100) != TOK_NE
)
430 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
432 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
435 /* This was a float compare. If the parity bit is
436 set the result was unordered, meaning false for everything
437 except TOK_NE, and true for TOK_NE. */
439 o(0x037a + (REX_BASE(r
) << 8));
441 orex(0,r
,0, 0x0f); /* setxx %br */
443 o(0xc0 + REG_VALUE(r
));
444 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
447 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
448 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
451 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
453 if (r
== TREG_XMM0
) {
454 assert(v
== TREG_ST0
);
455 /* gen_cvt_ftof(VT_DOUBLE); */
456 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
457 /* movsd -0x10(%rsp),%xmm0 */
460 } else if (r
== TREG_ST0
) {
461 assert(v
== TREG_XMM0
);
462 /* gen_cvt_ftof(VT_LDOUBLE); */
463 /* movsd %xmm0,-0x10(%rsp) */
466 o(0xf02444dd); /* fldl -0x10(%rsp) */
469 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
475 /* store register 'r' in lvalue 'v' */
476 void store(int r
, SValue
*v
)
480 /* store the REX prefix in this variable when PIC is enabled */
485 v
= pe_getimport(v
, &v2
);
490 fr
= v
->r
& VT_VALMASK
;
493 #ifndef TCC_TARGET_PE
494 /* we need to access the variable via got */
495 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
496 /* mov xx(%rip), %r11 */
498 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.ul
);
499 pic
= is64_type(bt
) ? 0x49 : 0x41;
503 /* XXX: incorrect if float reg to reg */
504 if (bt
== VT_FLOAT
) {
507 o(0x7e0f); /* movd */
509 } else if (bt
== VT_DOUBLE
) {
512 o(0xd60f); /* movq */
514 } else if (bt
== VT_LDOUBLE
) {
515 o(0xc0d9); /* fld %st(0) */
523 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
525 else if (is64_type(bt
))
531 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
536 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
537 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
538 } else if (fr
!= r
) {
539 /* XXX: don't we really come here? */
541 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
544 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
545 gen_modrm(r
, v
->r
, v
->sym
, fc
);
546 } else if (fr
!= r
) {
547 /* XXX: don't we really come here? */
549 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
554 /* 'is_jmp' is '1' if it is a jump */
555 static void gcall_or_jmp(int is_jmp
)
558 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
560 if (vtop
->r
& VT_SYM
) {
561 /* relocation case */
562 greloc(cur_text_section
, vtop
->sym
,
563 ind
+ 1, R_X86_64_PC32
);
565 /* put an empty PC32 relocation */
566 put_elf_reloc(symtab_section
, cur_text_section
,
567 ind
+ 1, R_X86_64_PC32
, 0);
569 oad(0xe8 + is_jmp
, vtop
->c
.ul
- 4); /* call/jmp im */
571 /* otherwise, indirect call */
575 o(0xff); /* call/jmp *r */
576 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
583 static const uint8_t arg_regs
[] = {
584 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
587 static int func_scratch
;
589 /* Generate function call. The function address is pushed first, then
590 all the parameters in call order. This functions pops all the
591 parameters and the function address. */
593 void gen_offs_sp(int b
, int r
, int d
)
595 orex(1,0,r
& 0x100 ? 0 : r
, b
);
597 o(0x2444 | (REG_VALUE(r
) << 3));
600 o(0x2484 | (REG_VALUE(r
) << 3));
605 void gfunc_call(int nb_args
)
607 int size
, align
, r
, args_size
, i
, d
, j
, bt
, struct_size
;
608 int nb_reg_args
, gen_reg
;
610 nb_reg_args
= nb_args
;
611 args_size
= (nb_reg_args
< REGN
? REGN
: nb_reg_args
) * PTR_SIZE
;
613 /* for struct arguments, we need to call memcpy and the function
614 call breaks register passing arguments we are preparing.
615 So, we process arguments which will be passed by stack first. */
616 struct_size
= args_size
;
617 for(i
= 0; i
< nb_args
; i
++) {
618 SValue
*sv
= &vtop
[-i
];
619 bt
= (sv
->type
.t
& VT_BTYPE
);
620 if (bt
== VT_STRUCT
) {
621 size
= type_size(&sv
->type
, &align
);
622 /* align to stack align size */
623 size
= (size
+ 15) & ~15;
624 /* generate structure store */
626 gen_offs_sp(0x8d, r
, struct_size
);
629 /* generate memcpy call */
630 vset(&sv
->type
, r
| VT_LVAL
, 0);
635 } else if (bt
== VT_LDOUBLE
) {
638 gen_offs_sp(0xdb, 0x107, struct_size
);
644 if (func_scratch
< struct_size
)
645 func_scratch
= struct_size
;
647 for (i
= 0; i
< REGN
; ++i
)
648 save_reg(arg_regs
[i
]);
651 gen_reg
= nb_reg_args
;
652 struct_size
= args_size
;
654 for(i
= 0; i
< nb_args
; i
++) {
655 bt
= (vtop
->type
.t
& VT_BTYPE
);
657 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
658 if (bt
== VT_LDOUBLE
)
661 size
= type_size(&vtop
->type
, &align
);
662 /* align to stack align size */
663 size
= (size
+ 15) & ~15;
667 gen_offs_sp(0x8d, d
, struct_size
);
668 gen_offs_sp(0x89, d
, j
*8);
671 gen_offs_sp(0x8d, d
, struct_size
);
675 } else if (is_sse_float(vtop
->type
.t
)) {
676 gv(RC_FLOAT
); /* only one float register */
679 /* movq %xmm0, j*8(%rsp) */
680 gen_offs_sp(0xd60f66, 0x100, j
*8);
682 /* movaps %xmm0, %xmmN */
686 /* mov %xmm0, %rxx */
689 o(0xc0 + REG_VALUE(d
));
695 gen_offs_sp(0x89, r
, j
*8);
699 gv(reg_classes
[d
] & ~RC_INT
);
704 o(0xc0 + REG_VALUE(d
) + REG_VALUE(r
) * 8);
718 #define FUNC_PROLOG_SIZE 11
720 /* generate function prolog of type 't' */
721 void gfunc_prolog(CType
*func_type
)
723 int addr
, reg_param_index
, bt
;
732 ind
+= FUNC_PROLOG_SIZE
;
733 func_sub_sp_offset
= ind
;
736 sym
= func_type
->ref
;
738 /* if the function returns a structure, then add an
739 implicit pointer parameter */
741 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
742 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
747 /* define parameters */
748 while ((sym
= sym
->next
) != NULL
) {
750 bt
= type
->t
& VT_BTYPE
;
751 if (reg_param_index
< REGN
) {
752 /* save arguments passed by register */
753 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
755 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
756 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
| VT_REF
, addr
);
758 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
764 while (reg_param_index
< REGN
) {
765 if (func_type
->ref
->c
== FUNC_ELLIPSIS
)
766 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
772 /* generate function epilog */
773 void gfunc_epilog(void)
778 if (func_ret_sub
== 0) {
783 g(func_ret_sub
>> 8);
787 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
788 /* align local size to word & save local variables */
789 v
= (func_scratch
+ -loc
+ 15) & -16;
792 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
793 oad(0xb8, v
); /* mov stacksize, %eax */
794 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
795 greloc(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
);
796 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
798 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
799 o(0xec8148); /* sub rsp, stacksize */
803 cur_text_section
->data_offset
= saved_ind
;
804 pe_add_unwind_data(ind
, saved_ind
, v
);
805 ind
= cur_text_section
->data_offset
;
810 static void gadd_sp(int val
)
812 if (val
== (char)val
) {
816 oad(0xc48148, val
); /* add $xxx, %rsp */
821 static const uint8_t arg_regs
[REGN
] = {
822 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
825 /* Generate function call. The function address is pushed first, then
826 all the parameters in call order. This functions pops all the
827 parameters and the function address. */
828 void gfunc_call(int nb_args
)
830 int size
, align
, r
, args_size
, i
;
833 int sse_reg
, gen_reg
;
835 /* calculate the number of integer/float arguments */
837 for(i
= 0; i
< nb_args
; i
++) {
838 if ((vtop
[-i
].type
.t
& VT_BTYPE
) == VT_STRUCT
) {
839 args_size
+= type_size(&vtop
[-i
].type
, &align
);
840 args_size
= (args_size
+ 7) & ~7;
841 } else if ((vtop
[-i
].type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
843 } else if (is_sse_float(vtop
[-i
].type
.t
)) {
845 if (nb_sse_args
> 8) args_size
+= 8;
848 if (nb_reg_args
> REGN
) args_size
+= 8;
852 /* for struct arguments, we need to call memcpy and the function
853 call breaks register passing arguments we are preparing.
854 So, we process arguments which will be passed by stack first. */
855 gen_reg
= nb_reg_args
;
856 sse_reg
= nb_sse_args
;
858 /* adjust stack to align SSE boundary */
859 if (args_size
&= 15) {
860 /* fetch cpu flag before the following sub will change the value */
861 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
864 args_size
= 16 - args_size
;
866 oad(0xec81, args_size
); /* sub $xxx, %rsp */
869 for(i
= 0; i
< nb_args
; i
++) {
870 /* Swap argument to top, it will possibly be changed here,
871 and might use more temps. All arguments must remain on the
872 stack, so that get_reg can correctly evict some of them onto
873 stack. We could use also use a vrott(nb_args) at the end
874 of this loop, but this seems faster. */
875 SValue tmp
= vtop
[0];
878 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
) {
879 size
= type_size(&vtop
->type
, &align
);
880 /* align to stack align size */
881 size
= (size
+ 7) & ~7;
882 /* allocate the necessary size on stack */
884 oad(0xec81, size
); /* sub $xxx, %rsp */
885 /* generate structure store */
887 orex(1, r
, 0, 0x89); /* mov %rsp, r */
888 o(0xe0 + REG_VALUE(r
));
889 vset(&vtop
->type
, r
| VT_LVAL
, 0);
893 } else if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
896 oad(0xec8148, size
); /* sub $xxx, %rsp */
897 o(0x7cdb); /* fstpt 0(%rsp) */
901 } else if (is_sse_float(vtop
->type
.t
)) {
905 o(0x50); /* push $rax */
906 /* movq %xmm0, (%rsp) */
914 /* XXX: implicit cast ? */
917 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
922 /* And swap the argument back to it's original position. */
928 /* XXX This should be superfluous. */
929 save_regs(0); /* save used temporary registers */
931 /* then, we prepare register passing arguments.
932 Note that we cannot set RDX and RCX in this loop because gv()
933 may break these temporary registers. Let's use R10 and R11
935 gen_reg
= nb_reg_args
;
936 sse_reg
= nb_sse_args
;
937 for(i
= 0; i
< nb_args
; i
++) {
938 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
||
939 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
940 } else if (is_sse_float(vtop
->type
.t
)) {
943 gv(RC_FLOAT
); /* only one float register */
944 /* movaps %xmm0, %xmmN */
946 o(0xc0 + (sse_reg
<< 3));
951 /* XXX: implicit cast ? */
955 if (j
== 2 || j
== 3)
956 /* j=2: r10, j=3: r11 */
958 orex(1,d
,r
,0x89); /* mov */
959 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
965 /* We shouldn't have many operands on the stack anymore, but the
966 call address itself is still there, and it might be in %eax
967 (or edx/ecx) currently, which the below writes would clobber.
968 So evict all remaining operands here. */
971 /* Copy R10 and R11 into RDX and RCX, respectively */
972 if (nb_reg_args
> 2) {
973 o(0xd2894c); /* mov %r10, %rdx */
974 if (nb_reg_args
> 3) {
975 o(0xd9894c); /* mov %r11, %rcx */
979 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
987 #define FUNC_PROLOG_SIZE 11
989 static void push_arg_reg(int i
) {
991 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
994 /* generate function prolog of type 't' */
995 void gfunc_prolog(CType
*func_type
)
997 int i
, addr
, align
, size
;
998 int param_index
, param_addr
, reg_param_index
, sse_param_index
;
1002 sym
= func_type
->ref
;
1003 addr
= PTR_SIZE
* 2;
1005 ind
+= FUNC_PROLOG_SIZE
;
1006 func_sub_sp_offset
= ind
;
1009 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
1010 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1011 seen_reg_num
= seen_sse_num
= 0;
1012 /* frame pointer and return address */
1013 seen_stack_size
= PTR_SIZE
* 2;
1014 /* count the number of seen parameters */
1015 sym
= func_type
->ref
;
1016 while ((sym
= sym
->next
) != NULL
) {
1018 if (is_sse_float(type
->t
)) {
1019 if (seen_sse_num
< 8) {
1022 seen_stack_size
+= 8;
1024 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
1025 size
= type_size(type
, &align
);
1026 size
= (size
+ 7) & ~7;
1027 seen_stack_size
+= size
;
1028 } else if ((type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1029 seen_stack_size
+= LDOUBLE_SIZE
;
1031 if (seen_reg_num
< REGN
) {
1034 seen_stack_size
+= 8;
1040 /* movl $0x????????, -0x10(%rbp) */
1042 gen_le32(seen_reg_num
* 8);
1043 /* movl $0x????????, -0xc(%rbp) */
1045 gen_le32(seen_sse_num
* 16 + 48);
1046 /* movl $0x????????, -0x8(%rbp) */
1048 gen_le32(seen_stack_size
);
1050 /* save all register passing arguments */
1051 for (i
= 0; i
< 8; i
++) {
1053 o(0xd60f66); /* movq */
1054 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1055 /* movq $0, loc+8(%rbp) */
1060 for (i
= 0; i
< REGN
; i
++) {
1061 push_arg_reg(REGN
-1-i
);
1065 sym
= func_type
->ref
;
1067 reg_param_index
= 0;
1068 sse_param_index
= 0;
1070 /* if the function returns a structure, then add an
1071 implicit pointer parameter */
1072 func_vt
= sym
->type
;
1073 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
1074 push_arg_reg(reg_param_index
);
1081 /* define parameters */
1082 while ((sym
= sym
->next
) != NULL
) {
1084 size
= type_size(type
, &align
);
1085 size
= (size
+ 7) & ~7;
1086 if (is_sse_float(type
->t
)) {
1087 if (sse_param_index
< 8) {
1088 /* save arguments passed by register */
1090 o(0xd60f66); /* movq */
1091 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, loc
);
1099 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
||
1100 (type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1104 if (reg_param_index
< REGN
) {
1105 /* save arguments passed by register */
1106 push_arg_reg(reg_param_index
);
1114 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1115 VT_LOCAL
| VT_LVAL
, param_addr
);
1120 /* generate function epilog */
1121 void gfunc_epilog(void)
1125 o(0xc9); /* leave */
1126 if (func_ret_sub
== 0) {
1129 o(0xc2); /* ret n */
1131 g(func_ret_sub
>> 8);
1133 /* align local size to word & save local variables */
1134 v
= (-loc
+ 15) & -16;
1136 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1137 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1138 o(0xec8148); /* sub rsp, stacksize */
1145 /* generate a jump to a label */
1148 return psym(0xe9, t
);
1151 /* generate a jump to a fixed address */
1152 void gjmp_addr(int a
)
1160 oad(0xe9, a
- ind
- 5);
1164 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1165 int gtst(int inv
, int t
)
1169 v
= vtop
->r
& VT_VALMASK
;
1171 /* fast case : can jump directly since flags are set */
1172 if (vtop
->c
.i
& 0x100)
1174 /* This was a float compare. If the parity flag is set
1175 the result was unordered. For anything except != this
1176 means false and we don't jump (anding both conditions).
1177 For != this means true (oring both).
1178 Take care about inverting the test. We need to jump
1179 to our target if the result was unordered and test wasn't NE,
1180 otherwise if unordered we don't want to jump. */
1181 vtop
->c
.i
&= ~0x100;
1182 if (!inv
== (vtop
->c
.i
!= TOK_NE
))
1183 o(0x067a); /* jp +6 */
1187 t
= psym(0x8a, t
); /* jp t */
1191 t
= psym((vtop
->c
.i
- 16) ^ inv
, t
);
1192 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1193 /* && or || optimization */
1194 if ((v
& 1) == inv
) {
1195 /* insert vtop->c jump list in t */
1198 p
= (int *)(cur_text_section
->data
+ *p
);
1206 if (is_float(vtop
->type
.t
) ||
1207 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1211 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1212 /* constant jmp optimization */
1213 if ((vtop
->c
.i
!= 0) != inv
)
1218 o(0xc0 + REG_VALUE(v
) * 9);
1220 t
= psym(0x85 ^ inv
, t
);
1227 /* generate an integer binary operation */
1228 void gen_opi(int op
)
1233 ll
= is64_type(vtop
[-1].type
.t
);
1234 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1235 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1239 case TOK_ADDC1
: /* add with carry generation */
1242 if (cc
&& (!ll
|| (int)vtop
->c
.ll
== vtop
->c
.ll
)) {
1249 /* XXX: generate inc and dec for smaller code ? */
1250 orex(ll
, r
, 0, 0x83);
1251 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1254 orex(ll
, r
, 0, 0x81);
1255 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1258 gv2(RC_INT
, RC_INT
);
1261 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1262 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1265 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1271 case TOK_SUBC1
: /* sub with carry generation */
1274 case TOK_ADDC2
: /* add with carry use */
1277 case TOK_SUBC2
: /* sub with carry use */
1290 gv2(RC_INT
, RC_INT
);
1293 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1294 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1306 opc
= 0xc0 | (opc
<< 3);
1312 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1313 o(opc
| REG_VALUE(r
));
1314 g(vtop
->c
.i
& (ll
? 63 : 31));
1316 /* we generate the shift in ecx */
1317 gv2(RC_INT
, RC_RCX
);
1319 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1320 o(opc
| REG_VALUE(r
));
1333 /* first operand must be in eax */
1334 /* XXX: need better constraint for second operand */
1335 gv2(RC_RAX
, RC_RCX
);
1340 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1341 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1342 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1343 if (op
== '%' || op
== TOK_UMOD
)
1355 void gen_opl(int op
)
1360 /* generate a floating point operation 'v = t1 op t2' instruction. The
1361 two operands are guaranted to have the same floating point type */
1362 /* XXX: need to use ST1 too */
1363 void gen_opf(int op
)
1365 int a
, ft
, fc
, swapped
, r
;
1367 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1369 /* convert constants to memory references */
1370 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1375 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1378 /* must put at least one value in the floating point register */
1379 if ((vtop
[-1].r
& VT_LVAL
) &&
1380 (vtop
[0].r
& VT_LVAL
)) {
1386 /* swap the stack if needed so that t1 is the register and t2 is
1387 the memory reference */
1388 if (vtop
[-1].r
& VT_LVAL
) {
1392 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1393 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1394 /* load on stack second operand */
1395 load(TREG_ST0
, vtop
);
1396 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1397 if (op
== TOK_GE
|| op
== TOK_GT
)
1399 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1402 o(0xc9d9); /* fxch %st(1) */
1403 o(0xe9da); /* fucompp */
1404 o(0xe0df); /* fnstsw %ax */
1406 o(0x45e480); /* and $0x45, %ah */
1407 o(0x40fC80); /* cmp $0x40, %ah */
1408 } else if (op
== TOK_NE
) {
1409 o(0x45e480); /* and $0x45, %ah */
1410 o(0x40f480); /* xor $0x40, %ah */
1412 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1413 o(0x05c4f6); /* test $0x05, %ah */
1416 o(0x45c4f6); /* test $0x45, %ah */
1423 /* no memory reference possible for long double operations */
1424 load(TREG_ST0
, vtop
);
1448 o(0xde); /* fxxxp %st, %st(1) */
1453 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1454 /* if saved lvalue, then we must reload it */
1457 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1459 r
= get_reg(RC_INT
);
1461 v1
.r
= VT_LOCAL
| VT_LVAL
;
1467 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1470 if (op
== TOK_LE
|| op
== TOK_LT
)
1472 if (op
== TOK_LE
|| op
== TOK_GE
) {
1473 op
= 0x93; /* setae */
1475 op
= 0x97; /* seta */
1480 o(0x7e0ff3); /* movq */
1481 gen_modrm(1, r
, vtop
->sym
, fc
);
1483 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1486 o(0x2e0f); /* ucomisd %xmm0, %xmm1 */
1489 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1492 o(0x2e0f); /* ucomisd */
1493 gen_modrm(0, r
, vtop
->sym
, fc
);
1498 vtop
->c
.i
= op
| 0x100;
1500 /* no memory reference possible for long double operations */
1501 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1502 load(TREG_XMM0
, vtop
);
1522 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
1523 o(0xde); /* fxxxp %st, %st(1) */
1526 /* if saved lvalue, then we must reload it */
1528 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1530 r
= get_reg(RC_INT
);
1532 v1
.r
= VT_LOCAL
| VT_LVAL
;
1538 /* movq %xmm0,%xmm1 */
1541 load(TREG_XMM0
, vtop
);
1542 /* subsd %xmm1,%xmm0 (f2 0f 5c c1) */
1543 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1552 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1559 gen_modrm(0, r
, vtop
->sym
, fc
);
1567 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1568 and 'long long' cases. */
1569 void gen_cvt_itof(int t
)
1571 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
1574 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1575 /* signed long long to float/double/long double (unsigned case
1576 is handled generically) */
1577 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1578 o(0x242cdf); /* fildll (%rsp) */
1579 o(0x08c48348); /* add $8, %rsp */
1580 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1581 (VT_INT
| VT_UNSIGNED
)) {
1582 /* unsigned int to float/double/long double */
1583 o(0x6a); /* push $0 */
1585 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1586 o(0x242cdf); /* fildll (%rsp) */
1587 o(0x10c48348); /* add $16, %rsp */
1589 /* int to float/double/long double */
1590 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1591 o(0x2404db); /* fildl (%rsp) */
1592 o(0x08c48348); /* add $8, %rsp */
1596 save_reg(TREG_XMM0
);
1598 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
));
1599 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1600 (VT_INT
| VT_UNSIGNED
) ||
1601 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1605 o(0xc0 + (vtop
->r
& VT_VALMASK
)); /* cvtsi2sd */
1606 vtop
->r
= TREG_XMM0
;
1610 /* convert from one floating point type to another */
1611 void gen_cvt_ftof(int t
)
1619 if (bt
== VT_FLOAT
) {
1621 if (tbt
== VT_DOUBLE
) {
1622 o(0xc0140f); /* unpcklps */
1623 o(0xc05a0f); /* cvtps2pd */
1624 } else if (tbt
== VT_LDOUBLE
) {
1625 /* movss %xmm0,-0x10(%rsp) */
1628 o(0xf02444d9); /* flds -0x10(%rsp) */
1631 } else if (bt
== VT_DOUBLE
) {
1633 if (tbt
== VT_FLOAT
) {
1634 o(0xc0140f66); /* unpcklpd */
1635 o(0xc05a0f66); /* cvtpd2ps */
1636 } else if (tbt
== VT_LDOUBLE
) {
1637 /* movsd %xmm0,-0x10(%rsp) */
1640 o(0xf02444dd); /* fldl -0x10(%rsp) */
1645 if (tbt
== VT_DOUBLE
) {
1646 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
1647 /* movsd -0x10(%rsp),%xmm0 */
1650 vtop
->r
= TREG_XMM0
;
1651 } else if (tbt
== VT_FLOAT
) {
1652 o(0xf0245cd9); /* fstps -0x10(%rsp) */
1653 /* movss -0x10(%rsp),%xmm0 */
1656 vtop
->r
= TREG_XMM0
;
1661 /* convert fp to int 't' type */
1662 void gen_cvt_ftoi(int t
)
1664 int ft
, bt
, size
, r
;
1667 if (bt
== VT_LDOUBLE
) {
1668 gen_cvt_ftof(VT_DOUBLE
);
1678 r
= get_reg(RC_INT
);
1679 if (bt
== VT_FLOAT
) {
1681 } else if (bt
== VT_DOUBLE
) {
1686 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
1687 o(0xc0 + (REG_VALUE(r
) << 3));
1691 /* computed goto support */
1698 /* end of x86-64 code generator */
1699 /*************************************************************/
1700 #endif /* ! TARGET_DEFS_ONLY */
1701 /******************************************************/