2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
41 #define RC_XMM0 0x0020
42 #define RC_ST0 0x0040 /* only for long double */
43 #define RC_IRET RC_RAX /* function return: integer register */
44 #define RC_LRET RC_RDX /* function return: second integer register */
45 #define RC_FRET RC_XMM0 /* function return: float register */
47 /* pretty names for the registers */
66 #define REX_BASE(reg) (((reg) >> 3) & 1)
67 #define REG_VALUE(reg) ((reg) & 7)
69 /* return registers for function */
70 #define REG_IRET TREG_RAX /* single word int return register */
71 #define REG_LRET TREG_RDX /* second word return register (for long long) */
72 #define REG_FRET TREG_XMM0 /* float return register */
74 /* defined if function parameters must be evaluated in reverse order */
75 #define INVERT_FUNC_PARAMS
77 /* pointer size, in bytes */
80 /* long double size and alignment, in bytes */
81 #define LDOUBLE_SIZE 16
82 #define LDOUBLE_ALIGN 8
83 /* maximum alignment (for aligned attribute support) */
86 /******************************************************/
89 #define EM_TCC_TARGET EM_X86_64
91 /* relocation type for 32 bit data relocation */
92 #define R_DATA_32 R_X86_64_32
93 #define R_DATA_PTR R_X86_64_64
94 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
95 #define R_COPY R_X86_64_COPY
97 #define ELF_START_ADDR 0x08048000
98 #define ELF_PAGE_SIZE 0x1000
100 /******************************************************/
101 #else /* ! TARGET_DEFS_ONLY */
102 /******************************************************/
106 ST_DATA
const int reg_classes
[NB_REGS
+7] = {
107 /* eax */ RC_INT
| RC_RAX
,
108 /* ecx */ RC_INT
| RC_RCX
,
109 /* edx */ RC_INT
| RC_RDX
,
110 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
121 static unsigned long func_sub_sp_offset
;
122 static int func_ret_sub
;
124 /* XXX: make it faster ? */
129 if (ind1
> cur_text_section
->data_allocated
)
130 section_realloc(cur_text_section
, ind1
);
131 cur_text_section
->data
[ind
] = c
;
135 void o(unsigned int c
)
157 void gen_le64(int64_t c
)
169 void orex(int ll
, int r
, int r2
, int b
)
171 if ((r
& VT_VALMASK
) >= VT_CONST
)
173 if ((r2
& VT_VALMASK
) >= VT_CONST
)
175 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
176 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
180 /* output a symbol and patch all calls to it */
181 void gsym_addr(int t
, int a
)
185 ptr
= (int *)(cur_text_section
->data
+ t
);
186 n
= *ptr
; /* next value */
197 /* psym is used to put an instruction with a data field which is a
198 reference to a symbol. It is in fact the same as oad ! */
201 static int is64_type(int t
)
203 return ((t
& VT_BTYPE
) == VT_PTR
||
204 (t
& VT_BTYPE
) == VT_FUNC
||
205 (t
& VT_BTYPE
) == VT_LLONG
);
208 static int is_sse_float(int t
) {
211 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
215 /* instruction + 4 bytes data. Return the address of the data */
216 ST_FUNC
int oad(int c
, int s
)
222 if (ind1
> cur_text_section
->data_allocated
)
223 section_realloc(cur_text_section
, ind1
);
224 *(int *)(cur_text_section
->data
+ ind
) = s
;
230 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
233 greloc(cur_text_section
, sym
, ind
, R_X86_64_32
);
237 /* output constant with relocation if 'r & VT_SYM' is true */
238 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
241 greloc(cur_text_section
, sym
, ind
, R_X86_64_64
);
245 /* output constant with relocation if 'r & VT_SYM' is true */
246 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
249 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
253 /* output got address with relocation */
254 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
256 #ifndef TCC_TARGET_PE
259 greloc(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
);
260 sr
= cur_text_section
->reloc
;
261 rel
= (ElfW(Rela
) *)(sr
->data
+ sr
->data_offset
- sizeof(ElfW(Rela
)));
264 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym
->v
, NULL
), c
, r
,
265 cur_text_section
->data
[ind
-3],
266 cur_text_section
->data
[ind
-2],
267 cur_text_section
->data
[ind
-1]
269 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
273 /* we use add c, %xxx for displacement */
275 o(0xc0 + REG_VALUE(r
));
280 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
282 op_reg
= REG_VALUE(op_reg
) << 3;
283 if ((r
& VT_VALMASK
) == VT_CONST
) {
284 /* constant memory reference */
287 gen_gotpcrel(r
, sym
, c
);
289 gen_addrpc32(r
, sym
, c
);
291 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
292 /* currently, we use only ebp as base */
294 /* short reference */
298 oad(0x85 | op_reg
, c
);
300 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
302 g(0x80 | op_reg
| REG_VALUE(r
));
305 g(0x00 | op_reg
| REG_VALUE(r
));
308 g(0x00 | op_reg
| REG_VALUE(r
));
312 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
314 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
316 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
319 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
321 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
324 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
325 orex(1, r
, op_reg
, opcode
);
326 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
330 /* load 'r' from value 'sv' */
331 void load(int r
, SValue
*sv
)
333 int v
, t
, ft
, fc
, fr
;
338 sv
= pe_getimport(sv
, &v2
);
345 #ifndef TCC_TARGET_PE
346 /* we use indirect access via got */
347 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
348 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
349 /* use the result register as a temporal register */
350 int tr
= r
| TREG_MEM
;
352 /* we cannot use float registers as a temporal register */
353 tr
= get_reg(RC_INT
) | TREG_MEM
;
355 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
357 /* load from the temporal register */
365 if (v
== VT_LLOCAL
) {
367 v1
.r
= VT_LOCAL
| VT_LVAL
;
370 if (!(reg_classes
[fr
] & RC_INT
))
371 fr
= get_reg(RC_INT
);
375 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
376 b
= 0x6e0f66, r
= 0; /* movd */
377 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
378 b
= 0x7e0ff3, r
= 0; /* movq */
379 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
380 b
= 0xdb, r
= 5; /* fldt */
381 } else if ((ft
& VT_TYPE
) == VT_BYTE
) {
382 b
= 0xbe0f; /* movsbl */
383 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
384 b
= 0xb60f; /* movzbl */
385 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
386 b
= 0xbf0f; /* movswl */
387 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
388 b
= 0xb70f; /* movzwl */
394 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
397 gen_modrm(r
, fr
, sv
->sym
, fc
);
404 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
405 gen_addrpc32(fr
, sv
->sym
, fc
);
407 if (sv
->sym
->type
.t
& VT_STATIC
) {
409 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
410 gen_addrpc32(fr
, sv
->sym
, fc
);
413 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
414 gen_gotpcrel(r
, sv
->sym
, fc
);
417 } else if (is64_type(ft
)) {
418 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
421 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
424 } else if (v
== VT_LOCAL
) {
425 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
426 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
427 } else if (v
== VT_CMP
) {
429 if ((fc
& ~0x100) != TOK_NE
)
430 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
432 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
435 /* This was a float compare. If the parity bit is
436 set the result was unordered, meaning false for everything
437 except TOK_NE, and true for TOK_NE. */
439 o(0x037a + (REX_BASE(r
) << 8));
441 orex(0,r
,0, 0x0f); /* setxx %br */
443 o(0xc0 + REG_VALUE(r
));
444 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
447 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
448 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
451 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
453 if (r
== TREG_XMM0
) {
454 assert(v
== TREG_ST0
);
455 /* gen_cvt_ftof(VT_DOUBLE); */
456 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
457 /* movsd -0x10(%rsp),%xmm0 */
460 } else if (r
== TREG_ST0
) {
461 assert(v
== TREG_XMM0
);
462 /* gen_cvt_ftof(VT_LDOUBLE); */
463 /* movsd %xmm0,-0x10(%rsp) */
466 o(0xf02444dd); /* fldl -0x10(%rsp) */
469 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
475 /* store register 'r' in lvalue 'v' */
476 void store(int r
, SValue
*v
)
480 /* store the REX prefix in this variable when PIC is enabled */
485 v
= pe_getimport(v
, &v2
);
490 fr
= v
->r
& VT_VALMASK
;
493 #ifndef TCC_TARGET_PE
494 /* we need to access the variable via got */
495 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
496 /* mov xx(%rip), %r11 */
498 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.ul
);
499 pic
= is64_type(bt
) ? 0x49 : 0x41;
503 /* XXX: incorrect if float reg to reg */
504 if (bt
== VT_FLOAT
) {
507 o(0x7e0f); /* movd */
509 } else if (bt
== VT_DOUBLE
) {
512 o(0xd60f); /* movq */
514 } else if (bt
== VT_LDOUBLE
) {
515 o(0xc0d9); /* fld %st(0) */
523 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
525 else if (is64_type(bt
))
531 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
536 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
537 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
538 } else if (fr
!= r
) {
539 /* XXX: don't we really come here? */
541 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
544 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
545 gen_modrm(r
, v
->r
, v
->sym
, fc
);
546 } else if (fr
!= r
) {
547 /* XXX: don't we really come here? */
549 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
554 /* 'is_jmp' is '1' if it is a jump */
555 static void gcall_or_jmp(int is_jmp
)
558 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
560 if (vtop
->r
& VT_SYM
) {
561 /* relocation case */
562 greloc(cur_text_section
, vtop
->sym
,
563 ind
+ 1, R_X86_64_PC32
);
565 /* put an empty PC32 relocation */
566 put_elf_reloc(symtab_section
, cur_text_section
,
567 ind
+ 1, R_X86_64_PC32
, 0);
569 oad(0xe8 + is_jmp
, vtop
->c
.ul
- 4); /* call/jmp im */
571 /* otherwise, indirect call */
575 o(0xff); /* call/jmp *r */
576 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
583 static const uint8_t arg_regs
[] = {
584 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
587 static int func_scratch
;
589 /* Generate function call. The function address is pushed first, then
590 all the parameters in call order. This functions pops all the
591 parameters and the function address. */
593 void gen_offs_sp(int b
, int r
, int d
)
595 orex(1,0,r
& 0x100 ? 0 : r
, b
);
597 o(0x2444 | (REG_VALUE(r
) << 3));
600 o(0x2484 | (REG_VALUE(r
) << 3));
605 /* Return 1 if this function returns via an sret pointer, 0 otherwise */
606 ST_FUNC
int gfunc_sret(CType
*vt
, CType
*ret
, int *ret_align
) {
607 *ret_align
= 1; // Never have to re-align return values for x86-64
611 void gfunc_call(int nb_args
)
613 int size
, align
, r
, args_size
, i
, d
, j
, bt
, struct_size
;
614 int nb_reg_args
, gen_reg
;
616 nb_reg_args
= nb_args
;
617 args_size
= (nb_reg_args
< REGN
? REGN
: nb_reg_args
) * PTR_SIZE
;
619 /* for struct arguments, we need to call memcpy and the function
620 call breaks register passing arguments we are preparing.
621 So, we process arguments which will be passed by stack first. */
622 struct_size
= args_size
;
623 for(i
= 0; i
< nb_args
; i
++) {
624 SValue
*sv
= &vtop
[-i
];
625 bt
= (sv
->type
.t
& VT_BTYPE
);
626 if (bt
== VT_STRUCT
) {
627 size
= type_size(&sv
->type
, &align
);
628 /* align to stack align size */
629 size
= (size
+ 15) & ~15;
630 /* generate structure store */
632 gen_offs_sp(0x8d, r
, struct_size
);
635 /* generate memcpy call */
636 vset(&sv
->type
, r
| VT_LVAL
, 0);
641 } else if (bt
== VT_LDOUBLE
) {
644 gen_offs_sp(0xdb, 0x107, struct_size
);
650 if (func_scratch
< struct_size
)
651 func_scratch
= struct_size
;
653 for (i
= 0; i
< REGN
; ++i
)
654 save_reg(arg_regs
[i
]);
657 gen_reg
= nb_reg_args
;
658 struct_size
= args_size
;
660 for(i
= 0; i
< nb_args
; i
++) {
661 bt
= (vtop
->type
.t
& VT_BTYPE
);
663 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
664 if (bt
== VT_LDOUBLE
)
667 size
= type_size(&vtop
->type
, &align
);
668 /* align to stack align size */
669 size
= (size
+ 15) & ~15;
673 gen_offs_sp(0x8d, d
, struct_size
);
674 gen_offs_sp(0x89, d
, j
*8);
677 gen_offs_sp(0x8d, d
, struct_size
);
681 } else if (is_sse_float(vtop
->type
.t
)) {
682 gv(RC_FLOAT
); /* only one float register */
685 /* movq %xmm0, j*8(%rsp) */
686 gen_offs_sp(0xd60f66, 0x100, j
*8);
688 /* movaps %xmm0, %xmmN */
692 /* mov %xmm0, %rxx */
695 o(0xc0 + REG_VALUE(d
));
701 gen_offs_sp(0x89, r
, j
*8);
705 gv(reg_classes
[d
] & ~RC_INT
);
710 o(0xc0 + REG_VALUE(d
) + REG_VALUE(r
) * 8);
724 #define FUNC_PROLOG_SIZE 11
726 /* generate function prolog of type 't' */
727 void gfunc_prolog(CType
*func_type
)
729 int addr
, reg_param_index
, bt
;
738 ind
+= FUNC_PROLOG_SIZE
;
739 func_sub_sp_offset
= ind
;
742 sym
= func_type
->ref
;
744 /* if the function returns a structure, then add an
745 implicit pointer parameter */
747 if ((func_vt
.t
& VT_BTYPE
) == VT_STRUCT
) {
748 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
753 /* define parameters */
754 while ((sym
= sym
->next
) != NULL
) {
756 bt
= type
->t
& VT_BTYPE
;
757 if (reg_param_index
< REGN
) {
758 /* save arguments passed by register */
759 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
761 if (bt
== VT_STRUCT
|| bt
== VT_LDOUBLE
) {
762 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
| VT_REF
, addr
);
764 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
770 while (reg_param_index
< REGN
) {
771 if (func_type
->ref
->c
== FUNC_ELLIPSIS
)
772 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
778 /* generate function epilog */
779 void gfunc_epilog(void)
784 if (func_ret_sub
== 0) {
789 g(func_ret_sub
>> 8);
793 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
794 /* align local size to word & save local variables */
795 v
= (func_scratch
+ -loc
+ 15) & -16;
798 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
799 oad(0xb8, v
); /* mov stacksize, %eax */
800 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
801 greloc(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
);
802 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
804 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
805 o(0xec8148); /* sub rsp, stacksize */
809 cur_text_section
->data_offset
= saved_ind
;
810 pe_add_unwind_data(ind
, saved_ind
, v
);
811 ind
= cur_text_section
->data_offset
;
816 static void gadd_sp(int val
)
818 if (val
== (char)val
) {
822 oad(0xc48148, val
); /* add $xxx, %rsp */
826 typedef enum X86_64_Mode
{
834 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
) {
837 else if (a
== x86_64_mode_none
)
839 else if (b
== x86_64_mode_none
)
841 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
842 return x86_64_mode_memory
;
843 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
844 return x86_64_mode_integer
;
845 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
846 return x86_64_mode_memory
;
848 return x86_64_mode_sse
;
851 static X86_64_Mode
classify_x86_64_inner(CType
*ty
) {
855 if (ty
->t
& VT_BITFIELD
)
856 return x86_64_mode_memory
;
858 switch (ty
->t
& VT_BTYPE
) {
859 case VT_VOID
: return x86_64_mode_none
;
867 case VT_ENUM
: return x86_64_mode_integer
;
870 case VT_DOUBLE
: return x86_64_mode_sse
;
872 case VT_LDOUBLE
: return x86_64_mode_x87
;
878 if (f
->next
&& (f
->c
== f
->next
->c
))
879 return x86_64_mode_memory
;
881 mode
= x86_64_mode_none
;
882 for (; f
; f
= f
->next
)
883 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
889 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, int *psize
, int *reg_count
) {
893 if (ty
->t
& VT_ARRAY
) {
896 return x86_64_mode_integer
;
899 size
= type_size(ty
, &align
);
900 size
= (size
+ 7) & ~7;
903 return x86_64_mode_memory
;
905 mode
= classify_x86_64_inner(ty
);
907 if (mode
== x86_64_mode_integer
)
908 *reg_count
= size
/ 8;
909 else if (mode
== x86_64_mode_none
)
917 static X86_64_Mode
classify_x86_64_arg_type(CType
*vt
, CType
*ret
, int *psize
, int *reg_count
) {
923 mode
= classify_x86_64_arg(vt
, &size
, reg_count
);
926 case x86_64_mode_integer
:
935 case x86_64_mode_x87
:
939 case x86_64_mode_sse
:
952 /* Return 1 if this function returns via an sret pointer, 0 otherwise */
953 int gfunc_sret(CType
*vt
, CType
*ret
, int *ret_align
) {
955 *ret_align
= 1; // Never have to re-align return values for x86-64
956 return (classify_x86_64_arg_type(vt
, ret
, &size
, ®_count
) == x86_64_mode_memory
);
960 static const uint8_t arg_regs
[REGN
] = {
961 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
964 /* Generate function call. The function address is pushed first, then
965 all the parameters in call order. This functions pops all the
966 parameters and the function address. */
967 void gfunc_call(int nb_args
)
971 int size
, align
, r
, args_size
, i
, j
, reg_count
;
974 int sse_reg
, gen_reg
;
976 /* calculate the number of integer/float arguments */
978 for(i
= 0; i
< nb_args
; i
++) {
979 mode
= classify_x86_64_arg(&vtop
[-i
].type
, &size
, ®_count
);
981 case x86_64_mode_memory
:
982 case x86_64_mode_x87
:
986 case x86_64_mode_sse
:
987 nb_sse_args
+= reg_count
;
988 if (nb_sse_args
> 8) args_size
+= size
;
991 case x86_64_mode_integer
:
992 nb_reg_args
+= reg_count
;
993 if (nb_reg_args
> REGN
) args_size
+= size
;
998 /* for struct arguments, we need to call memcpy and the function
999 call breaks register passing arguments we are preparing.
1000 So, we process arguments which will be passed by stack first. */
1001 gen_reg
= nb_reg_args
;
1002 sse_reg
= nb_sse_args
;
1004 /* adjust stack to align SSE boundary */
1005 if (args_size
&= 15) {
1006 /* fetch cpu flag before the following sub will change the value */
1007 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
1010 args_size
= 16 - args_size
;
1012 oad(0xec81, args_size
); /* sub $xxx, %rsp */
1015 for(i
= 0; i
< nb_args
; i
++) {
1016 /* Swap argument to top, it will possibly be changed here,
1017 and might use more temps. All arguments must remain on the
1018 stack, so that get_reg can correctly evict some of them onto
1019 stack. We could use also use a vrott(nb_args) at the end
1020 of this loop, but this seems faster. */
1021 SValue tmp
= vtop
[0];
1024 mode
= classify_x86_64_arg(&vtop
->type
, &size
, ®_count
);
1026 case x86_64_mode_memory
:
1027 /* allocate the necessary size on stack */
1029 oad(0xec81, size
); /* sub $xxx, %rsp */
1030 /* generate structure store */
1031 r
= get_reg(RC_INT
);
1032 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1033 o(0xe0 + REG_VALUE(r
));
1034 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1040 case x86_64_mode_x87
:
1042 size
= LDOUBLE_SIZE
;
1043 oad(0xec8148, size
); /* sub $xxx, %rsp */
1044 o(0x7cdb); /* fstpt 0(%rsp) */
1050 case x86_64_mode_sse
:
1053 o(0x50); /* push $rax */
1054 /* movq %xmm0, (%rsp) */
1059 sse_reg
-= reg_count
;
1062 case x86_64_mode_integer
:
1064 /* XXX: implicit cast ? */
1065 if (gen_reg
> REGN
) {
1067 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1070 gen_reg
-= reg_count
;
1074 /* And swap the argument back to it's original position. */
1080 /* XXX This should be superfluous. */
1081 save_regs(0); /* save used temporary registers */
1083 /* then, we prepare register passing arguments.
1084 Note that we cannot set RDX and RCX in this loop because gv()
1085 may break these temporary registers. Let's use R10 and R11
1087 gen_reg
= nb_reg_args
;
1088 sse_reg
= nb_sse_args
;
1089 for(i
= 0; i
< nb_args
; i
++) {
1090 mode
= classify_x86_64_arg_type(&vtop
->type
, &type
, &size
, ®_count
);
1091 /* Alter stack entry type so that gv() knows how to treat it */
1097 case x86_64_mode_sse
:
1099 sse_reg
-= reg_count
;
1101 for (j
= 0; j
< reg_count
; ++j
) {
1103 gv(RC_FLOAT
); /* only one float register */
1104 /* movaps %xmm0, %xmmN */
1106 o(0xc0 + (sse_reg
<< 3));
1111 case x86_64_mode_integer
:
1113 /* XXX: implicit cast ? */
1115 gen_reg
-= reg_count
;
1117 for (j
= 0; j
< reg_count
; ++j
) {
1119 int d
= arg_regs
[gen_reg
];
1121 if (gen_reg
== 2 || gen_reg
== 3)
1122 /* gen_reg=2: r10, gen_reg=3: r11 */
1124 orex(1,d
,r
,0x89); /* mov */
1125 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1133 /* We shouldn't have many operands on the stack anymore, but the
1134 call address itself is still there, and it might be in %eax
1135 (or edx/ecx) currently, which the below writes would clobber.
1136 So evict all remaining operands here. */
1139 /* Copy R10 and R11 into RDX and RCX, respectively */
1140 if (nb_reg_args
> 2) {
1141 o(0xd2894c); /* mov %r10, %rdx */
1142 if (nb_reg_args
> 3) {
1143 o(0xd9894c); /* mov %r11, %rcx */
1147 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1155 #define FUNC_PROLOG_SIZE 11
1157 static void push_arg_reg(int i
) {
1159 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1162 /* generate function prolog of type 't' */
1163 void gfunc_prolog(CType
*func_type
)
1166 int i
, addr
, align
, size
, reg_count
;
1167 int param_index
, param_addr
, reg_param_index
, sse_param_index
;
1171 sym
= func_type
->ref
;
1172 addr
= PTR_SIZE
* 2;
1174 ind
+= FUNC_PROLOG_SIZE
;
1175 func_sub_sp_offset
= ind
;
1178 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
1179 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1180 seen_reg_num
= seen_sse_num
= 0;
1181 /* frame pointer and return address */
1182 seen_stack_size
= PTR_SIZE
* 2;
1183 /* count the number of seen parameters */
1184 sym
= func_type
->ref
;
1185 while ((sym
= sym
->next
) != NULL
) {
1187 if (is_sse_float(type
->t
)) {
1188 if (seen_sse_num
< 8) {
1191 seen_stack_size
+= 8;
1193 } else if ((type
->t
& VT_BTYPE
) == VT_STRUCT
) {
1194 size
= type_size(type
, &align
);
1195 size
= (size
+ 7) & ~7;
1196 seen_stack_size
+= size
;
1197 } else if ((type
->t
& VT_BTYPE
) == VT_LDOUBLE
) {
1198 seen_stack_size
+= LDOUBLE_SIZE
;
1200 if (seen_reg_num
< REGN
) {
1203 seen_stack_size
+= 8;
1209 /* movl $0x????????, -0x10(%rbp) */
1211 gen_le32(seen_reg_num
* 8);
1212 /* movl $0x????????, -0xc(%rbp) */
1214 gen_le32(seen_sse_num
* 16 + 48);
1215 /* movl $0x????????, -0x8(%rbp) */
1217 gen_le32(seen_stack_size
);
1219 /* save all register passing arguments */
1220 for (i
= 0; i
< 8; i
++) {
1222 o(0xd60f66); /* movq */
1223 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1224 /* movq $0, loc+8(%rbp) */
1229 for (i
= 0; i
< REGN
; i
++) {
1230 push_arg_reg(REGN
-1-i
);
1234 sym
= func_type
->ref
;
1236 reg_param_index
= 0;
1237 sse_param_index
= 0;
1239 /* if the function returns a structure, then add an
1240 implicit pointer parameter */
1241 func_vt
= sym
->type
;
1242 mode
= classify_x86_64_arg(&func_vt
, &size
, ®_count
);
1243 if (mode
== x86_64_mode_memory
) {
1244 push_arg_reg(reg_param_index
);
1251 /* define parameters */
1252 while ((sym
= sym
->next
) != NULL
) {
1254 mode
= classify_x86_64_arg(type
, &size
, ®_count
);
1256 case x86_64_mode_sse
:
1257 if (sse_param_index
+ reg_count
<= 8) {
1258 /* save arguments passed by register */
1259 for (i
= 0; i
< reg_count
; ++i
) {
1261 o(0xd60f66); /* movq */
1262 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, loc
);
1269 sse_param_index
+= reg_count
;
1273 case x86_64_mode_memory
:
1274 case x86_64_mode_x87
:
1279 case x86_64_mode_integer
: {
1280 if (reg_param_index
+ reg_count
<= REGN
) {
1281 /* save arguments passed by register */
1282 for (i
= 0; i
< reg_count
; ++i
) {
1283 push_arg_reg(reg_param_index
);
1290 reg_param_index
+= reg_count
;
1295 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1296 VT_LOCAL
| VT_LVAL
, param_addr
);
1301 /* generate function epilog */
1302 void gfunc_epilog(void)
1306 o(0xc9); /* leave */
1307 if (func_ret_sub
== 0) {
1310 o(0xc2); /* ret n */
1312 g(func_ret_sub
>> 8);
1314 /* align local size to word & save local variables */
1315 v
= (-loc
+ 15) & -16;
1317 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1318 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1319 o(0xec8148); /* sub rsp, stacksize */
1326 /* generate a jump to a label */
1329 return psym(0xe9, t
);
1332 /* generate a jump to a fixed address */
1333 void gjmp_addr(int a
)
1341 oad(0xe9, a
- ind
- 5);
1345 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1346 int gtst(int inv
, int t
)
1350 v
= vtop
->r
& VT_VALMASK
;
1352 /* fast case : can jump directly since flags are set */
1353 if (vtop
->c
.i
& 0x100)
1355 /* This was a float compare. If the parity flag is set
1356 the result was unordered. For anything except != this
1357 means false and we don't jump (anding both conditions).
1358 For != this means true (oring both).
1359 Take care about inverting the test. We need to jump
1360 to our target if the result was unordered and test wasn't NE,
1361 otherwise if unordered we don't want to jump. */
1362 vtop
->c
.i
&= ~0x100;
1363 if (!inv
== (vtop
->c
.i
!= TOK_NE
))
1364 o(0x067a); /* jp +6 */
1368 t
= psym(0x8a, t
); /* jp t */
1372 t
= psym((vtop
->c
.i
- 16) ^ inv
, t
);
1373 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
1374 /* && or || optimization */
1375 if ((v
& 1) == inv
) {
1376 /* insert vtop->c jump list in t */
1379 p
= (int *)(cur_text_section
->data
+ *p
);
1387 if (is_float(vtop
->type
.t
) ||
1388 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1392 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
) {
1393 /* constant jmp optimization */
1394 if ((vtop
->c
.i
!= 0) != inv
)
1399 o(0xc0 + REG_VALUE(v
) * 9);
1401 t
= psym(0x85 ^ inv
, t
);
1408 /* generate an integer binary operation */
1409 void gen_opi(int op
)
1414 ll
= is64_type(vtop
[-1].type
.t
);
1415 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1416 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1420 case TOK_ADDC1
: /* add with carry generation */
1423 if (cc
&& (!ll
|| (int)vtop
->c
.ll
== vtop
->c
.ll
)) {
1430 /* XXX: generate inc and dec for smaller code ? */
1431 orex(ll
, r
, 0, 0x83);
1432 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1435 orex(ll
, r
, 0, 0x81);
1436 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1439 gv2(RC_INT
, RC_INT
);
1442 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1443 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1446 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1452 case TOK_SUBC1
: /* sub with carry generation */
1455 case TOK_ADDC2
: /* add with carry use */
1458 case TOK_SUBC2
: /* sub with carry use */
1471 gv2(RC_INT
, RC_INT
);
1474 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1475 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1487 opc
= 0xc0 | (opc
<< 3);
1493 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1494 o(opc
| REG_VALUE(r
));
1495 g(vtop
->c
.i
& (ll
? 63 : 31));
1497 /* we generate the shift in ecx */
1498 gv2(RC_INT
, RC_RCX
);
1500 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1501 o(opc
| REG_VALUE(r
));
1514 /* first operand must be in eax */
1515 /* XXX: need better constraint for second operand */
1516 gv2(RC_RAX
, RC_RCX
);
1521 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1522 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1523 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1524 if (op
== '%' || op
== TOK_UMOD
)
1536 void gen_opl(int op
)
1541 /* generate a floating point operation 'v = t1 op t2' instruction. The
1542 two operands are guaranted to have the same floating point type */
1543 /* XXX: need to use ST1 too */
1544 void gen_opf(int op
)
1546 int a
, ft
, fc
, swapped
, r
;
1548 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1550 /* convert constants to memory references */
1551 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1556 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1559 /* must put at least one value in the floating point register */
1560 if ((vtop
[-1].r
& VT_LVAL
) &&
1561 (vtop
[0].r
& VT_LVAL
)) {
1567 /* swap the stack if needed so that t1 is the register and t2 is
1568 the memory reference */
1569 if (vtop
[-1].r
& VT_LVAL
) {
1573 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1574 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1575 /* load on stack second operand */
1576 load(TREG_ST0
, vtop
);
1577 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1578 if (op
== TOK_GE
|| op
== TOK_GT
)
1580 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1583 o(0xc9d9); /* fxch %st(1) */
1584 o(0xe9da); /* fucompp */
1585 o(0xe0df); /* fnstsw %ax */
1587 o(0x45e480); /* and $0x45, %ah */
1588 o(0x40fC80); /* cmp $0x40, %ah */
1589 } else if (op
== TOK_NE
) {
1590 o(0x45e480); /* and $0x45, %ah */
1591 o(0x40f480); /* xor $0x40, %ah */
1593 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1594 o(0x05c4f6); /* test $0x05, %ah */
1597 o(0x45c4f6); /* test $0x45, %ah */
1604 /* no memory reference possible for long double operations */
1605 load(TREG_ST0
, vtop
);
1629 o(0xde); /* fxxxp %st, %st(1) */
1634 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1635 /* if saved lvalue, then we must reload it */
1638 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1640 r
= get_reg(RC_INT
);
1642 v1
.r
= VT_LOCAL
| VT_LVAL
;
1648 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1651 if (op
== TOK_LE
|| op
== TOK_LT
)
1653 if (op
== TOK_LE
|| op
== TOK_GE
) {
1654 op
= 0x93; /* setae */
1656 op
= 0x97; /* seta */
1661 o(0x7e0ff3); /* movq */
1662 gen_modrm(1, r
, vtop
->sym
, fc
);
1664 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1667 o(0x2e0f); /* ucomisd %xmm0, %xmm1 */
1670 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
) {
1673 o(0x2e0f); /* ucomisd */
1674 gen_modrm(0, r
, vtop
->sym
, fc
);
1679 vtop
->c
.i
= op
| 0x100;
1681 /* no memory reference possible for long double operations */
1682 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1683 load(TREG_XMM0
, vtop
);
1703 if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
1704 o(0xde); /* fxxxp %st, %st(1) */
1707 /* if saved lvalue, then we must reload it */
1709 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1711 r
= get_reg(RC_INT
);
1713 v1
.r
= VT_LOCAL
| VT_LVAL
;
1719 /* movq %xmm0,%xmm1 */
1722 load(TREG_XMM0
, vtop
);
1723 /* subsd %xmm1,%xmm0 (f2 0f 5c c1) */
1724 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1733 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1740 gen_modrm(0, r
, vtop
->sym
, fc
);
1748 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1749 and 'long long' cases. */
1750 void gen_cvt_itof(int t
)
1752 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
1755 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1756 /* signed long long to float/double/long double (unsigned case
1757 is handled generically) */
1758 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1759 o(0x242cdf); /* fildll (%rsp) */
1760 o(0x08c48348); /* add $8, %rsp */
1761 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1762 (VT_INT
| VT_UNSIGNED
)) {
1763 /* unsigned int to float/double/long double */
1764 o(0x6a); /* push $0 */
1766 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1767 o(0x242cdf); /* fildll (%rsp) */
1768 o(0x10c48348); /* add $16, %rsp */
1770 /* int to float/double/long double */
1771 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1772 o(0x2404db); /* fildl (%rsp) */
1773 o(0x08c48348); /* add $8, %rsp */
1777 save_reg(TREG_XMM0
);
1779 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
));
1780 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1781 (VT_INT
| VT_UNSIGNED
) ||
1782 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1786 o(0xc0 + (vtop
->r
& VT_VALMASK
)); /* cvtsi2sd */
1787 vtop
->r
= TREG_XMM0
;
1791 /* convert from one floating point type to another */
1792 void gen_cvt_ftof(int t
)
1800 if (bt
== VT_FLOAT
) {
1802 if (tbt
== VT_DOUBLE
) {
1803 o(0xc0140f); /* unpcklps */
1804 o(0xc05a0f); /* cvtps2pd */
1805 } else if (tbt
== VT_LDOUBLE
) {
1806 /* movss %xmm0,-0x10(%rsp) */
1809 o(0xf02444d9); /* flds -0x10(%rsp) */
1812 } else if (bt
== VT_DOUBLE
) {
1814 if (tbt
== VT_FLOAT
) {
1815 o(0xc0140f66); /* unpcklpd */
1816 o(0xc05a0f66); /* cvtpd2ps */
1817 } else if (tbt
== VT_LDOUBLE
) {
1818 /* movsd %xmm0,-0x10(%rsp) */
1821 o(0xf02444dd); /* fldl -0x10(%rsp) */
1826 if (tbt
== VT_DOUBLE
) {
1827 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
1828 /* movsd -0x10(%rsp),%xmm0 */
1831 vtop
->r
= TREG_XMM0
;
1832 } else if (tbt
== VT_FLOAT
) {
1833 o(0xf0245cd9); /* fstps -0x10(%rsp) */
1834 /* movss -0x10(%rsp),%xmm0 */
1837 vtop
->r
= TREG_XMM0
;
1842 /* convert fp to int 't' type */
1843 void gen_cvt_ftoi(int t
)
1845 int ft
, bt
, size
, r
;
1848 if (bt
== VT_LDOUBLE
) {
1849 gen_cvt_ftof(VT_DOUBLE
);
1859 r
= get_reg(RC_INT
);
1860 if (bt
== VT_FLOAT
) {
1862 } else if (bt
== VT_DOUBLE
) {
1867 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
1868 o(0xc0 + (REG_VALUE(r
) << 3));
1872 /* computed goto support */
1879 /* end of x86-64 code generator */
1880 /*************************************************************/
1881 #endif /* ! TARGET_DEFS_ONLY */
1882 /******************************************************/