2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
39 #define RC_XMM0 0x0020
40 #define RC_ST0 0x0040 /* only for long double */
41 #define RC_IRET RC_RAX /* function return: integer register */
42 #define RC_LRET RC_RDX /* function return: second integer register */
43 #define RC_FRET RC_XMM0 /* function return: float register */
45 /* pretty names for the registers */
64 #define REX_BASE(reg) (((reg) >> 3) & 1)
65 #define REG_VALUE(reg) ((reg) & 7)
67 /* return registers for function */
68 #define REG_IRET TREG_RAX /* single word int return register */
69 #define REG_LRET TREG_RDX /* second word return register (for long long) */
70 #define REG_FRET TREG_XMM0 /* float return register */
72 /* defined if function parameters must be evaluated in reverse order */
73 #define INVERT_FUNC_PARAMS
75 /* pointer size, in bytes */
78 /* long double size and alignment, in bytes */
79 #define LDOUBLE_SIZE 16
80 #define LDOUBLE_ALIGN 8
81 /* maximum alignment (for aligned attribute support) */
84 ST_FUNC
void gen_opl(int op
);
85 ST_FUNC
void gen_le64(int64_t c
);
87 /******************************************************/
90 #define EM_TCC_TARGET EM_X86_64
92 /* relocation type for 32 bit data relocation */
93 #define R_DATA_32 R_X86_64_32
94 #define R_DATA_PTR R_X86_64_64
95 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
96 #define R_COPY R_X86_64_COPY
98 #define ELF_START_ADDR 0x08048000
99 #define ELF_PAGE_SIZE 0x1000
101 /******************************************************/
102 #else /* ! TARGET_DEFS_ONLY */
103 /******************************************************/
107 ST_DATA
const int reg_classes
[NB_REGS
] = {
108 /* eax */ RC_INT
| RC_RAX
,
109 /* ecx */ RC_INT
| RC_RCX
,
110 /* edx */ RC_INT
| RC_RDX
,
111 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
122 static unsigned long func_sub_sp_offset
;
123 static int func_ret_sub
;
125 /* XXX: make it faster ? */
130 if (ind1
> cur_text_section
->data_allocated
)
131 section_realloc(cur_text_section
, ind1
);
132 cur_text_section
->data
[ind
] = c
;
136 void o(unsigned int c
)
158 void gen_le64(int64_t c
)
170 /* output a symbol and patch all calls to it */
171 void gsym_addr(int t
, int a
)
175 ptr
= (int *)(cur_text_section
->data
+ t
);
176 n
= *ptr
; /* next value */
187 /* psym is used to put an instruction with a data field which is a
188 reference to a symbol. It is in fact the same as oad ! */
191 static int is64_type(int t
)
193 return ((t
& VT_BTYPE
) == VT_PTR
||
194 (t
& VT_BTYPE
) == VT_FUNC
||
195 (t
& VT_BTYPE
) == VT_LLONG
);
198 static int is_sse_float(int t
) {
201 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
204 /* instruction + 4 bytes data. Return the address of the data */
205 ST_FUNC
int oad(int c
, int s
)
211 if (ind1
> cur_text_section
->data_allocated
)
212 section_realloc(cur_text_section
, ind1
);
213 *(int *)(cur_text_section
->data
+ ind
) = s
;
219 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
222 greloc(cur_text_section
, sym
, ind
, R_X86_64_32
);
226 /* output constant with relocation if 'r & VT_SYM' is true */
227 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
230 greloc(cur_text_section
, sym
, ind
, R_X86_64_64
);
234 /* output constant with relocation if 'r & VT_SYM' is true */
235 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
238 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
242 /* output got address with relocation */
243 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
245 #ifndef TCC_TARGET_PE
248 greloc(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
);
249 sr
= cur_text_section
->reloc
;
250 rel
= (ElfW(Rela
) *)(sr
->data
+ sr
->data_offset
- sizeof(ElfW(Rela
)));
253 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym
->v
, NULL
), c
, r
,
254 cur_text_section
->data
[ind
-3],
255 cur_text_section
->data
[ind
-2],
256 cur_text_section
->data
[ind
-1]
258 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
263 /* we use add c, %xxx for displacement */
264 o(0x48 + REX_BASE(r
));
266 o(0xc0 + REG_VALUE(r
));
271 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
273 op_reg
= REG_VALUE(op_reg
) << 3;
274 if ((r
& VT_VALMASK
) == VT_CONST
) {
275 /* constant memory reference */
278 gen_gotpcrel(r
, sym
, c
);
280 gen_addrpc32(r
, sym
, c
);
282 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
283 /* currently, we use only ebp as base */
285 /* short reference */
289 oad(0x85 | op_reg
, c
);
291 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
293 g(0x80 | op_reg
| REG_VALUE(r
));
296 g(0x00 | op_reg
| REG_VALUE(r
));
299 g(0x00 | op_reg
| REG_VALUE(r
));
303 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
305 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
307 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
310 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
312 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
315 int rex
= 0x48 | (REX_BASE(op_reg
) << 2);
316 if ((r
& VT_VALMASK
) != VT_CONST
&&
317 (r
& VT_VALMASK
) != VT_LOCAL
) {
318 rex
|= REX_BASE(VT_VALMASK
& r
);
322 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
323 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
327 /* load 'r' from value 'sv' */
328 void load(int r
, SValue
*sv
)
330 int v
, t
, ft
, fc
, fr
;
334 if (pe_dllimport(r
, sv
, load
))
342 #ifndef TCC_TARGET_PE
343 /* we use indirect access via got */
344 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
345 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
346 /* use the result register as a temporal register */
347 int tr
= r
| TREG_MEM
;
349 /* we cannot use float registers as a temporal register */
350 tr
= get_reg(RC_INT
) | TREG_MEM
;
352 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
354 /* load from the temporal register */
361 if (v
== VT_LLOCAL
) {
363 v1
.r
= VT_LOCAL
| VT_LVAL
;
368 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
369 o(0x6e0f66); /* movd */
371 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
372 o(0x7e0ff3); /* movq */
374 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
379 if ((ft
& VT_TYPE
) == VT_BYTE
) {
380 x
= 0xbe0f; /* movsbl */
381 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
382 x
= 0xb60f; /* movzbl */
383 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
384 x
= 0xbf0f; /* movswl */
385 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
386 x
= 0xb70f; /* movzwl */
387 } else if (is64_type(ft
)) {
388 gen_modrm64(0x8b, r
, fr
, sv
->sym
, fc
);
393 if (REX_BASE(r
) || (!(fr
& ~0x0f) && REX_BASE(fr
)))
394 o(0x40 + REX_BASE(fr
) + (REX_BASE(r
) << 2));
397 gen_modrm(r
, fr
, sv
->sym
, fc
);
402 o(0x8d48 + (REX_BASE(r
) << 2));
403 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
404 gen_addrpc32(fr
, sv
->sym
, fc
);
406 if (sv
->sym
->type
.t
& VT_STATIC
) {
407 o(0x8d48 + REX_BASE(r
));
408 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
409 gen_addrpc32(fr
, sv
->sym
, fc
);
411 o(0x8b48 + REX_BASE(r
));
412 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
413 gen_gotpcrel(r
, sv
->sym
, fc
);
416 } else if (is64_type(ft
)) {
417 o(0x48 + REX_BASE(r
));
418 o(0xb8 + REG_VALUE(r
)); /* mov $xx, r */
423 o(0xb8 + REG_VALUE(r
)); /* mov $xx, r */
426 } else if (v
== VT_LOCAL
) {
427 o(0x48 | (REX_BASE(r
) << 2));
428 o(0x8d); /* lea xxx(%ebp), r */
429 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
430 } else if (v
== VT_CMP
) {
433 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
436 o(0x0f); /* setxx %br */
438 o(0xc0 + REG_VALUE(r
));
439 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
443 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
444 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
448 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
450 if (r
== TREG_XMM0
) {
451 assert(v
== TREG_ST0
);
452 /* gen_cvt_ftof(VT_DOUBLE); */
453 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
454 /* movsd -0x10(%rsp),%xmm0 */
457 } else if (r
== TREG_ST0
) {
458 assert(v
== TREG_XMM0
);
459 /* gen_cvt_ftof(VT_LDOUBLE); */
460 /* movsd %xmm0,-0x10(%rsp) */
463 o(0xf02444dd); /* fldl -0x10(%rsp) */
465 o(0x48 | REX_BASE(r
) | (REX_BASE(v
) << 2));
467 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
473 /* store register 'r' in lvalue 'v' */
474 void store(int r
, SValue
*v
)
478 /* store the REX prefix in this variable when PIC is enabled */
482 if (pe_dllimport(r
, v
, store
))
488 fr
= v
->r
& VT_VALMASK
;
491 #ifndef TCC_TARGET_PE
492 /* we need to access the variable via got */
493 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
494 /* mov xx(%rip), %r11 */
496 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.ul
);
497 pic
= is64_type(bt
) ? 0x49 : 0x41;
501 /* XXX: incorrect if float reg to reg */
502 if (bt
== VT_FLOAT
) {
505 o(0x7e0f); /* movd */
507 } else if (bt
== VT_DOUBLE
) {
510 o(0xd60f); /* movq */
512 } else if (bt
== VT_LDOUBLE
) {
513 o(0xc0d9); /* fld %st(0) */
521 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
523 else if (is64_type(bt
))
529 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
534 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
535 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
536 } else if (fr
!= r
) {
537 /* XXX: don't we really come here? */
539 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
542 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
543 gen_modrm(r
, v
->r
, v
->sym
, fc
);
544 } else if (fr
!= r
) {
545 /* XXX: don't we really come here? */
547 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
552 /* 'is_jmp' is '1' if it is a jump */
553 static void gcall_or_jmp(int is_jmp
)
556 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
558 if (vtop
->r
& VT_SYM
) {
559 /* relocation case */
560 greloc(cur_text_section
, vtop
->sym
,
561 ind
+ 1, R_X86_64_PC32
);
563 /* put an empty PC32 relocation */
564 put_elf_reloc(symtab_section
, cur_text_section
,
565 ind
+ 1, R_X86_64_PC32
, 0);
567 oad(0xe8 + is_jmp
, vtop
->c
.ul
- 4); /* call/jmp im */
569 /* otherwise, indirect call */
573 o(0xff); /* call/jmp *r */
574 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
581 static const uint8_t arg_regs
[] = {
582 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
585 static int func_scratch
;
587 /* Generate function call. The function address is pushed first, then
588 all the parameters in call order. This functions pops all the
589 parameters and the function address. */
591 void gen_offs_sp(int b
, int r
, int d
)
594 o(b
| 0x4000 | (r
<< 11));
597 o(b
| 0x8000 | (r
<< 11));
602 void gfunc_call(int nb_args
)
604 int size
, align
, r
, args_size
, i
, d
, j
, bt
;
605 int nb_reg_args
, gen_reg
;
607 /* calculate the number of integer/float arguments */
609 for(i
= 0; i
< nb_args
; i
++) {
610 bt
= (vtop
[-i
].type
.t
& VT_BTYPE
);
611 if (bt
!= VT_STRUCT
&& bt
!= VT_LDOUBLE
)
615 args_size
= (nb_reg_args
< REGN
? REGN
: nb_reg_args
) * PTR_SIZE
;
617 /* for struct arguments, we need to call memcpy and the function
618 call breaks register passing arguments we are preparing.
619 So, we process arguments which will be passed by stack first. */
620 for(i
= 0; i
< nb_args
; i
++) {
621 SValue
*sv
= &vtop
[-i
];
622 bt
= (sv
->type
.t
& VT_BTYPE
);
623 if (bt
== VT_STRUCT
) {
624 size
= type_size(&sv
->type
, &align
);
625 /* align to stack align size */
626 size
= (size
+ 15) & ~16;
627 /* generate structure store */
630 gen_offs_sp(0x24048d, r
, args_size
);
633 /* generate memcpy call */
634 vset(&sv
->type
, r
| VT_LVAL
, 0);
639 } else if (bt
== VT_LDOUBLE
) {
642 gen_offs_sp(0x243cdb, 0, args_size
);
648 if (func_scratch
< args_size
)
649 func_scratch
= args_size
;
651 for (i
= 0; i
< REGN
; ++i
)
652 save_reg(arg_regs
[i
]);
654 gen_reg
= nb_reg_args
;
655 for(i
= 0; i
< nb_args
; i
++) {
656 bt
= (vtop
->type
.t
& VT_BTYPE
);
657 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
659 } else if (is_sse_float(vtop
->type
.t
)) {
660 gv(RC_FLOAT
); /* only one float register */
664 /* movq %xmm0, j*8(%rsp) */
665 gen_offs_sp(0x2444d6, 0, j
*8);
667 /* movaps %xmm0, %xmmN */
671 /* mov %xmm0, %rxx */
673 o(0x7e0f48 + REX_BASE(d
));
674 o(0xc0 + REG_VALUE(d
));
681 gen_offs_sp(0x244489, r
, j
*8);
685 gv(reg_classes
[d
] & ~RC_INT
);
689 o(0x8948 + REX_BASE(d
));
690 o(0xc0 + r
*8 + REG_VALUE(d
));
705 #define FUNC_PROLOG_SIZE 11
707 /* generate function prolog of type 't' */
708 void gfunc_prolog(CType
*func_type
)
710 int addr
, align
, size
, reg_param_index
, bt
;
719 ind
+= FUNC_PROLOG_SIZE
;
720 func_sub_sp_offset
= ind
;
723 sym
= func_type
->ref
;
725 /* if the function returns a structure, then add an
726 implicit pointer parameter */
728 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
729 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
734 /* define parameters */
735 while ((sym
= sym
->next
) != NULL
) {
737 bt
= type
->t
& VT_BTYPE
;
738 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
)
740 if (reg_param_index
< REGN
) {
741 /* save arguments passed by register */
742 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
744 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
749 while (reg_param_index
< REGN
) {
750 if (func_type
->ref
->c
== FUNC_ELLIPSIS
)
751 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
756 sym
= func_type
->ref
;
757 while ((sym
= sym
->next
) != NULL
) {
759 bt
= type
->t
& VT_BTYPE
;
760 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
761 size
= type_size(type
, &align
);
762 size
= (size
+ 15) & -16;
763 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
769 /* generate function epilog */
770 void gfunc_epilog(void)
775 if (func_ret_sub
== 0) {
780 g(func_ret_sub
>> 8);
784 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
785 /* align local size to word & save local variables */
786 v
= (func_scratch
+ -loc
+ 15) & -16;
788 pe_add_unwind_data(ind
, saved_ind
, v
);
791 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
792 oad(0xb8, v
); /* mov stacksize, %eax */
793 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
794 greloc(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
);
795 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
797 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
798 o(0xec8148); /* sub rsp, stacksize */
806 static void gadd_sp(int val
)
808 if (val
== (char)val
) {
812 oad(0xc48148, val
); /* add $xxx, %rsp */
817 static const uint8_t arg_regs
[REGN
] = {
818 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
821 /* Generate function call. The function address is pushed first, then
822 all the parameters in call order. This functions pops all the
823 parameters and the function address. */
824 void gfunc_call(int nb_args
)
826 int size
, align
, r
, args_size
, i
;
830 int sse_reg
, gen_reg
;
832 /* calculate the number of integer/float arguments */
834 for(i
= 0; i
< nb_args
; i
++) {
835 if ((vtop
[-i
].type
.t
& VT_BTYPE
) == VT_STRUCT
) {
836 args_size
+= type_size(&vtop
->type
, &align
);
837 } else if ((vtop
[-i
].type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
839 } else if (is_sse_float(vtop
[-i
].type
.t
)) {
841 if (nb_sse_args
> 8) args_size
+= 8;
844 if (nb_reg_args
> REGN
) args_size
+= 8;
848 /* for struct arguments, we need to call memcpy and the function
849 call breaks register passing arguments we are preparing.
850 So, we process arguments which will be passed by stack first. */
852 gen_reg
= nb_reg_args
;
853 sse_reg
= nb_sse_args
;
855 /* adjust stack to align SSE boundary */
856 if (args_size
&= 8) {
857 o(0x50); /* push $rax */
859 for(i
= 0; i
< nb_args
; i
++) {
860 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
) {
861 size
= type_size(&vtop
->type
, &align
);
862 /* align to stack align size */
863 size
= (size
+ 3) & ~3;
864 /* allocate the necessary size on stack */
866 oad(0xec81, size
); /* sub $xxx, %rsp */
867 /* generate structure store */
869 o(0x48 + REX_BASE(r
));
870 o(0x89); /* mov %rsp, r */
873 /* following code breaks vtop[1] */
874 SValue tmp
= vtop
[1];
875 vset(&vtop
->type
, r
| VT_LVAL
, 0);
881 } else if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
884 oad(0xec8148, size
); /* sub $xxx, %rsp */
885 o(0x7cdb); /* fstpt 0(%rsp) */
889 } else if (is_sse_float(vtop
->type
.t
)) {
893 o(0x50); /* push $rax */
894 /* movq %xmm0, (%rsp) */
902 /* XXX: implicit cast ? */
905 o(0x50 + r
); /* push r */
913 /* then, we prepare register passing arguments.
914 Note that we cannot set RDX and RCX in this loop because gv()
915 may break these temporary registers. Let's use R10 and R11
917 gen_reg
= nb_reg_args
;
918 sse_reg
= nb_sse_args
;
919 for(i
= 0; i
< nb_args
; i
++) {
920 if ((vtop
->type
.t
& VT_BTYPE
) == VT_STRUCT
||
921 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
922 } else if (is_sse_float(vtop
->type
.t
)) {
925 gv(RC_FLOAT
); /* only one float register */
926 /* movaps %xmm0, %xmmN */
928 o(0xc0 + (sse_reg
<< 3));
933 /* XXX: implicit cast ? */
938 o(0xc0 + r
* 8 + arg_regs
[j
]);
941 /* j=2: r10, j=3: r11 */
945 /* j=4: r8, j=5: r9 */
946 o(0xc0 + r
* 8 + j
- 4);
953 save_regs(0); /* save used temporary registers */
955 /* Copy R10 and R11 into RDX and RCX, respectively */
956 if (nb_reg_args
> 2) {
957 o(0xd2894c); /* mov %r10, %rdx */
958 if (nb_reg_args
> 3) {
959 o(0xd9894c); /* mov %r11, %rcx */
963 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
971 #define FUNC_PROLOG_SIZE 11
973 static void push_arg_reg(int i
) {
975 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
978 /* generate function prolog of type 't' */
979 void gfunc_prolog(CType
*func_type
)
981 int i
, addr
, align
, size
;
982 int param_index
, param_addr
, reg_param_index
, sse_param_index
;
986 sym
= func_type
->ref
;
989 ind
+= FUNC_PROLOG_SIZE
;
990 func_sub_sp_offset
= ind
;
993 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
994 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
995 seen_reg_num
= seen_sse_num
= 0;
996 /* frame pointer and return address */
997 seen_stack_size
= PTR_SIZE
* 2;
998 /* count the number of seen parameters */
999 sym
= func_type
->ref
;
1000 while ((sym
= sym
->next
) != NULL
) {
1002 if (is_sse_float(type
->t
)) {
1003 if (seen_sse_num
< 8) {
1006 seen_stack_size
+= 8;
1008 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
1009 size
= type_size(type
, &align
);
1010 size
= (size
+ 3) & ~3;
1011 seen_stack_size
+= size
;
1012 } else if ((type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1013 seen_stack_size
+= LDOUBLE_SIZE
;
1015 if (seen_reg_num
< REGN
) {
1018 seen_stack_size
+= 8;
1024 /* movl $0x????????, -0x10(%rbp) */
1026 gen_le32(seen_reg_num
* 8);
1027 /* movl $0x????????, -0xc(%rbp) */
1029 gen_le32(seen_sse_num
* 16 + 48);
1030 /* movl $0x????????, -0x8(%rbp) */
1032 gen_le32(seen_stack_size
);
1034 /* save all register passing arguments */
1035 for (i
= 0; i
< 8; i
++) {
1037 o(0xd60f66); /* movq */
1038 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1039 /* movq $0, loc+8(%rbp) */
1044 for (i
= 0; i
< REGN
; i
++) {
1045 push_arg_reg(REGN
-1-i
);
1049 sym
= func_type
->ref
;
1051 reg_param_index
= 0;
1052 sse_param_index
= 0;
1054 /* if the function returns a structure, then add an
1055 implicit pointer parameter */
1056 func_vt
= sym
->type
;
1057 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
1058 push_arg_reg(reg_param_index
);
1065 /* define parameters */
1066 while ((sym
= sym
->next
) != NULL
) {
1068 size
= type_size(type
, &align
);
1069 size
= (size
+ 3) & ~3;
1070 if (is_sse_float(type
->t
)) {
1071 if (sse_param_index
< 8) {
1072 /* save arguments passed by register */
1074 o(0xd60f66); /* movq */
1075 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, loc
);
1083 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
||
1084 (type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1088 if (reg_param_index
< REGN
) {
1089 /* save arguments passed by register */
1090 push_arg_reg(reg_param_index
);
1098 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1099 VT_LOCAL
| VT_LVAL
, param_addr
);
1104 /* generate function epilog */
1105 void gfunc_epilog(void)
1109 o(0xc9); /* leave */
1110 if (func_ret_sub
== 0) {
1113 o(0xc2); /* ret n */
1115 g(func_ret_sub
>> 8);
1117 /* align local size to word & save local variables */
1118 v
= (-loc
+ 15) & -16;
1120 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1121 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1122 o(0xec8148); /* sub rsp, stacksize */
1129 /* generate a jump to a label */
1132 return psym(0xe9, t
);
1135 /* generate a jump to a fixed address */
1136 void gjmp_addr(int a
)
1144 oad(0xe9, a
- ind
- 5);
1148 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1149 int gtst(int inv
, int t
)
1153 v
= vtop
->r
& VT_VALMASK
;
1155 /* fast case : can jump directly since flags are set */
1157 t
= psym((vtop
->c
.i
- 16) ^ inv
, t
);
1158 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1159 /* && or || optimization */
1160 if ((v
& 1) == inv
) {
1161 /* insert vtop->c jump list in t */
1164 p
= (int *)(cur_text_section
->data
+ *p
);
1172 if (is_float(vtop
->type
.t
) ||
1173 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1177 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1178 /* constant jmp optimization */
1179 if ((vtop
->c
.i
!= 0) != inv
)
1186 t
= psym(0x85 ^ inv
, t
);
1193 /* generate an integer binary operation */
1194 void gen_opi(int op
)
1200 case TOK_ADDC1
: /* add with carry generation */
1203 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
&&
1204 !is64_type(vtop
->type
.t
)) {
1208 if (is64_type(vtop
->type
.t
)) {
1209 o(0x48 | REX_BASE(r
));
1214 /* XXX: generate inc and dec for smaller code ? */
1216 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1220 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1223 gv2(RC_INT
, RC_INT
);
1227 is64_type(vtop
[0].type
.t
) || (vtop
[0].type
.t
& VT_UNSIGNED
) ||
1228 is64_type(vtop
[-1].type
.t
) || (vtop
[-1].type
.t
& VT_UNSIGNED
)) {
1229 o(0x48 | REX_BASE(r
) | (REX_BASE(fr
) << 2));
1231 o((opc
<< 3) | 0x01);
1232 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1235 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1241 case TOK_SUBC1
: /* sub with carry generation */
1244 case TOK_ADDC2
: /* add with carry use */
1247 case TOK_SUBC2
: /* sub with carry use */
1260 gv2(RC_INT
, RC_INT
);
1263 if (is64_type(vtop
[0].type
.t
) || (vtop
[0].type
.t
& VT_UNSIGNED
) ||
1264 is64_type(vtop
[-1].type
.t
) || (vtop
[-1].type
.t
& VT_UNSIGNED
)) {
1265 o(0x48 | REX_BASE(fr
) | (REX_BASE(r
) << 2));
1268 o(0xaf0f); /* imul fr, r */
1269 o(0xc0 + fr
+ r
* 8);
1280 opc
= 0xc0 | (opc
<< 3);
1281 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1285 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1286 o(0x48 | REX_BASE(r
));
1293 o(0xc1); /* shl/shr/sar $xxx, r */
1297 /* we generate the shift in ecx */
1298 gv2(RC_INT
, RC_RCX
);
1300 if ((vtop
[-1].type
.t
& VT_BTYPE
) == VT_LLONG
) {
1301 o(0x48 | REX_BASE(r
));
1303 o(0xd3); /* shl/shr/sar %cl, r */
1314 /* first operand must be in eax */
1315 /* XXX: need better constraint for second operand */
1316 gv2(RC_RAX
, RC_RCX
);
1321 if (op
== TOK_UMULL
) {
1322 o(0xf7); /* mul fr */
1324 vtop
->r2
= TREG_RDX
;
1327 if (op
== TOK_UDIV
|| op
== TOK_UMOD
) {
1328 if ((vtop
->type
.t
& VT_BTYPE
) & VT_LLONG
) {
1329 o(0xd23148); /* xor %rdx, %rdx */
1330 o(0x48 + REX_BASE(fr
));
1332 o(0xd231); /* xor %edx, %edx */
1334 o(0xf7); /* div fr, %eax */
1337 if ((vtop
->type
.t
& VT_BTYPE
) & VT_LLONG
) {
1338 o(0x9948); /* cqto */
1339 o(0x48 + REX_BASE(fr
));
1343 o(0xf7); /* idiv fr, %eax */
1346 if (op
== '%' || op
== TOK_UMOD
)
1359 void gen_opl(int op
)
1364 /* generate a floating point operation 'v = t1 op t2' instruction. The
1365 two operands are guaranted to have the same floating point type */
1366 /* XXX: need to use ST1 too */
1367 void gen_opf(int op
)
1369 int a
, ft
, fc
, swapped
, r
;
1371 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1373 /* convert constants to memory references */
1374 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1379 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1382 /* must put at least one value in the floating point register */
1383 if ((vtop
[-1].r
& VT_LVAL
) &&
1384 (vtop
[0].r
& VT_LVAL
)) {
1390 /* swap the stack if needed so that t1 is the register and t2 is
1391 the memory reference */
1392 if (vtop
[-1].r
& VT_LVAL
) {
1396 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1397 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1398 /* load on stack second operand */
1399 load(TREG_ST0
, vtop
);
1400 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1401 if (op
== TOK_GE
|| op
== TOK_GT
)
1403 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1406 o(0xc9d9); /* fxch %st(1) */
1407 o(0xe9da); /* fucompp */
1408 o(0xe0df); /* fnstsw %ax */
1410 o(0x45e480); /* and $0x45, %ah */
1411 o(0x40fC80); /* cmp $0x40, %ah */
1412 } else if (op
== TOK_NE
) {
1413 o(0x45e480); /* and $0x45, %ah */
1414 o(0x40f480); /* xor $0x40, %ah */
1416 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1417 o(0x05c4f6); /* test $0x05, %ah */
1420 o(0x45c4f6); /* test $0x45, %ah */
1427 /* no memory reference possible for long double operations */
1428 load(TREG_ST0
, vtop
);
1452 o(0xde); /* fxxxp %st, %st(1) */
1457 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1458 /* if saved lvalue, then we must reload it */
1461 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1463 r
= get_reg(RC_INT
);
1465 v1
.r
= VT_LOCAL
| VT_LVAL
;
1471 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1474 if (op
== TOK_LE
|| op
== TOK_LT
)
1476 if (op
== TOK_LE
|| op
== TOK_GE
) {
1477 op
= 0x93; /* setae */
1479 op
= 0x97; /* seta */
1484 o(0x7e0ff3); /* movq */
1485 gen_modrm(1, r
, vtop
->sym
, fc
);
1487 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1490 o(0x2e0f); /* ucomisd %xmm0, %xmm1 */
1493 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1496 o(0x2e0f); /* ucomisd */
1497 gen_modrm(0, r
, vtop
->sym
, fc
);
1504 /* no memory reference possible for long double operations */
1505 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1506 load(TREG_XMM0
, vtop
);
1526 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
1527 o(0xde); /* fxxxp %st, %st(1) */
1530 /* if saved lvalue, then we must reload it */
1532 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1534 r
= get_reg(RC_INT
);
1536 v1
.r
= VT_LOCAL
| VT_LVAL
;
1542 /* movq %xmm0,%xmm1 */
1545 load(TREG_XMM0
, vtop
);
1546 /* subsd %xmm1,%xmm0 (f2 0f 5c c1) */
1547 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1556 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1563 gen_modrm(0, r
, vtop
->sym
, fc
);
1571 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1572 and 'long long' cases. */
1573 void gen_cvt_itof(int t
)
1575 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
1578 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1579 /* signed long long to float/double/long double (unsigned case
1580 is handled generically) */
1581 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1582 o(0x242cdf); /* fildll (%rsp) */
1583 o(0x08c48348); /* add $8, %rsp */
1584 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1585 (VT_INT
| VT_UNSIGNED
)) {
1586 /* unsigned int to float/double/long double */
1587 o(0x6a); /* push $0 */
1589 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1590 o(0x242cdf); /* fildll (%rsp) */
1591 o(0x10c48348); /* add $16, %rsp */
1593 /* int to float/double/long double */
1594 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1595 o(0x2404db); /* fildl (%rsp) */
1596 o(0x08c48348); /* add $8, %rsp */
1600 save_reg(TREG_XMM0
);
1602 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
));
1603 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1604 (VT_INT
| VT_UNSIGNED
) ||
1605 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1609 o(0xc0 + (vtop
->r
& VT_VALMASK
)); /* cvtsi2sd */
1610 vtop
->r
= TREG_XMM0
;
1614 /* convert from one floating point type to another */
1615 void gen_cvt_ftof(int t
)
1623 if (bt
== VT_FLOAT
) {
1625 if (tbt
== VT_DOUBLE
) {
1626 o(0xc0140f); /* unpcklps */
1627 o(0xc05a0f); /* cvtps2pd */
1628 } else if (tbt
== VT_LDOUBLE
) {
1629 /* movss %xmm0,-0x10(%rsp) */
1632 o(0xf02444d9); /* flds -0x10(%rsp) */
1635 } else if (bt
== VT_DOUBLE
) {
1637 if (tbt
== VT_FLOAT
) {
1638 o(0xc0140f66); /* unpcklpd */
1639 o(0xc05a0f66); /* cvtpd2ps */
1640 } else if (tbt
== VT_LDOUBLE
) {
1641 /* movsd %xmm0,-0x10(%rsp) */
1644 o(0xf02444dd); /* fldl -0x10(%rsp) */
1649 if (tbt
== VT_DOUBLE
) {
1650 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
1651 /* movsd -0x10(%rsp),%xmm0 */
1654 vtop
->r
= TREG_XMM0
;
1655 } else if (tbt
== VT_FLOAT
) {
1656 o(0xf0245cd9); /* fstps -0x10(%rsp) */
1657 /* movss -0x10(%rsp),%xmm0 */
1660 vtop
->r
= TREG_XMM0
;
1665 /* convert fp to int 't' type */
1666 void gen_cvt_ftoi(int t
)
1668 int ft
, bt
, size
, r
;
1671 if (bt
== VT_LDOUBLE
) {
1672 gen_cvt_ftof(VT_DOUBLE
);
1682 r
= get_reg(RC_INT
);
1683 if (bt
== VT_FLOAT
) {
1685 } else if (bt
== VT_DOUBLE
) {
1691 o(0x48 + REX_BASE(r
));
1693 o(0x2c0f); /* cvttss2si or cvttsd2si */
1694 o(0xc0 + (REG_VALUE(r
) << 3));
1698 /* computed goto support */
1705 /* end of x86-64 code generator */
1706 /*************************************************************/
1707 #endif /* ! TARGET_DEFS_ONLY */
1708 /******************************************************/