2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
37 #define RC_ST0 0x0080 /* only for long double */
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
104 /******************************************************/
107 #define EM_TCC_TARGET EM_X86_64
109 /* relocation type for 32 bit data relocation */
110 #define R_DATA_32 R_X86_64_32
111 #define R_DATA_PTR R_X86_64_64
112 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
113 #define R_COPY R_X86_64_COPY
115 #define ELF_START_ADDR 0x08048000
116 #define ELF_PAGE_SIZE 0x1000
118 /******************************************************/
119 #else /* ! TARGET_DEFS_ONLY */
120 /******************************************************/
124 ST_DATA
const int reg_classes
[NB_REGS
] = {
125 /* eax */ RC_INT
| RC_RAX
,
126 /* ecx */ RC_INT
| RC_RCX
,
127 /* edx */ RC_INT
| RC_RDX
,
141 /* xmm0 */ RC_FLOAT
| RC_XMM0
,
142 /* xmm1 */ RC_FLOAT
| RC_XMM1
,
143 /* xmm2 */ RC_FLOAT
| RC_XMM2
,
144 /* xmm3 */ RC_FLOAT
| RC_XMM3
,
145 /* xmm4 */ RC_FLOAT
| RC_XMM4
,
146 /* xmm5 */ RC_FLOAT
| RC_XMM5
,
147 /* xmm6 an xmm7 are included so gv() can be used on them,
148 but they are not tagged with RC_FLOAT because they are
149 callee saved on Windows */
155 static unsigned long func_sub_sp_offset
;
156 static int func_ret_sub
;
158 /* XXX: make it faster ? */
163 if (ind1
> cur_text_section
->data_allocated
)
164 section_realloc(cur_text_section
, ind1
);
165 cur_text_section
->data
[ind
] = c
;
169 void o(unsigned int c
)
191 void gen_le64(int64_t c
)
203 void orex(int ll
, int r
, int r2
, int b
)
205 if ((r
& VT_VALMASK
) >= VT_CONST
)
207 if ((r2
& VT_VALMASK
) >= VT_CONST
)
209 if (ll
|| REX_BASE(r
) || REX_BASE(r2
))
210 o(0x40 | REX_BASE(r
) | (REX_BASE(r2
) << 2) | (ll
<< 3));
214 /* output a symbol and patch all calls to it */
215 void gsym_addr(int t
, int a
)
219 ptr
= (int *)(cur_text_section
->data
+ t
);
220 n
= *ptr
; /* next value */
231 /* psym is used to put an instruction with a data field which is a
232 reference to a symbol. It is in fact the same as oad ! */
235 static int is64_type(int t
)
237 return ((t
& VT_BTYPE
) == VT_PTR
||
238 (t
& VT_BTYPE
) == VT_FUNC
||
239 (t
& VT_BTYPE
) == VT_LLONG
);
242 /* instruction + 4 bytes data. Return the address of the data */
243 ST_FUNC
int oad(int c
, int s
)
249 if (ind1
> cur_text_section
->data_allocated
)
250 section_realloc(cur_text_section
, ind1
);
251 *(int *)(cur_text_section
->data
+ ind
) = s
;
257 ST_FUNC
void gen_addr32(int r
, Sym
*sym
, int c
)
260 greloc(cur_text_section
, sym
, ind
, R_X86_64_32
);
264 /* output constant with relocation if 'r & VT_SYM' is true */
265 ST_FUNC
void gen_addr64(int r
, Sym
*sym
, int64_t c
)
268 greloc(cur_text_section
, sym
, ind
, R_X86_64_64
);
272 /* output constant with relocation if 'r & VT_SYM' is true */
273 ST_FUNC
void gen_addrpc32(int r
, Sym
*sym
, int c
)
276 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
280 /* output got address with relocation */
281 static void gen_gotpcrel(int r
, Sym
*sym
, int c
)
283 #ifndef TCC_TARGET_PE
286 greloc(cur_text_section
, sym
, ind
, R_X86_64_GOTPCREL
);
287 sr
= cur_text_section
->reloc
;
288 rel
= (ElfW(Rela
) *)(sr
->data
+ sr
->data_offset
- sizeof(ElfW(Rela
)));
291 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym
->v
, NULL
), c
, r
,
292 cur_text_section
->data
[ind
-3],
293 cur_text_section
->data
[ind
-2],
294 cur_text_section
->data
[ind
-1]
296 greloc(cur_text_section
, sym
, ind
, R_X86_64_PC32
);
300 /* we use add c, %xxx for displacement */
302 o(0xc0 + REG_VALUE(r
));
307 static void gen_modrm_impl(int op_reg
, int r
, Sym
*sym
, int c
, int is_got
)
309 op_reg
= REG_VALUE(op_reg
) << 3;
310 if ((r
& VT_VALMASK
) == VT_CONST
) {
311 /* constant memory reference */
314 gen_gotpcrel(r
, sym
, c
);
316 gen_addrpc32(r
, sym
, c
);
318 } else if ((r
& VT_VALMASK
) == VT_LOCAL
) {
319 /* currently, we use only ebp as base */
321 /* short reference */
325 oad(0x85 | op_reg
, c
);
327 } else if ((r
& VT_VALMASK
) >= TREG_MEM
) {
329 g(0x80 | op_reg
| REG_VALUE(r
));
332 g(0x00 | op_reg
| REG_VALUE(r
));
335 g(0x00 | op_reg
| REG_VALUE(r
));
339 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
341 static void gen_modrm(int op_reg
, int r
, Sym
*sym
, int c
)
343 gen_modrm_impl(op_reg
, r
, sym
, c
, 0);
346 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
348 static void gen_modrm64(int opcode
, int op_reg
, int r
, Sym
*sym
, int c
)
351 is_got
= (op_reg
& TREG_MEM
) && !(sym
->type
.t
& VT_STATIC
);
352 orex(1, r
, op_reg
, opcode
);
353 gen_modrm_impl(op_reg
, r
, sym
, c
, is_got
);
357 /* load 'r' from value 'sv' */
358 void load(int r
, SValue
*sv
)
360 int v
, t
, ft
, fc
, fr
;
365 sv
= pe_getimport(sv
, &v2
);
369 ft
= sv
->type
.t
& ~VT_DEFSIGN
;
372 #ifndef TCC_TARGET_PE
373 /* we use indirect access via got */
374 if ((fr
& VT_VALMASK
) == VT_CONST
&& (fr
& VT_SYM
) &&
375 (fr
& VT_LVAL
) && !(sv
->sym
->type
.t
& VT_STATIC
)) {
376 /* use the result register as a temporal register */
377 int tr
= r
| TREG_MEM
;
379 /* we cannot use float registers as a temporal register */
380 tr
= get_reg(RC_INT
) | TREG_MEM
;
382 gen_modrm64(0x8b, tr
, fr
, sv
->sym
, 0);
384 /* load from the temporal register */
392 if (v
== VT_LLOCAL
) {
394 v1
.r
= VT_LOCAL
| VT_LVAL
;
397 if (!(reg_classes
[fr
] & RC_INT
))
398 fr
= get_reg(RC_INT
);
402 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
404 r
= REG_VALUE(r
); /* movd */
405 } else if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
406 b
= 0x7e0ff3; /* movq */
408 } else if ((ft
& VT_BTYPE
) == VT_LDOUBLE
) {
409 b
= 0xdb, r
= 5; /* fldt */
410 } else if ((ft
& VT_TYPE
) == VT_BYTE
|| (ft
& VT_TYPE
) == VT_BOOL
) {
411 b
= 0xbe0f; /* movsbl */
412 } else if ((ft
& VT_TYPE
) == (VT_BYTE
| VT_UNSIGNED
)) {
413 b
= 0xb60f; /* movzbl */
414 } else if ((ft
& VT_TYPE
) == VT_SHORT
) {
415 b
= 0xbf0f; /* movswl */
416 } else if ((ft
& VT_TYPE
) == (VT_SHORT
| VT_UNSIGNED
)) {
417 b
= 0xb70f; /* movzwl */
419 assert(((ft
& VT_BTYPE
) == VT_INT
) || ((ft
& VT_BTYPE
) == VT_LLONG
)
420 || ((ft
& VT_BTYPE
) == VT_PTR
) || ((ft
& VT_BTYPE
) == VT_ENUM
)
421 || ((ft
& VT_BTYPE
) == VT_FUNC
));
426 gen_modrm64(b
, r
, fr
, sv
->sym
, fc
);
429 gen_modrm(r
, fr
, sv
->sym
, fc
);
436 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
437 gen_addrpc32(fr
, sv
->sym
, fc
);
439 if (sv
->sym
->type
.t
& VT_STATIC
) {
441 o(0x05 + REG_VALUE(r
) * 8); /* lea xx(%rip), r */
442 gen_addrpc32(fr
, sv
->sym
, fc
);
445 o(0x05 + REG_VALUE(r
) * 8); /* mov xx(%rip), r */
446 gen_gotpcrel(r
, sv
->sym
, fc
);
449 } else if (is64_type(ft
)) {
450 orex(1,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
453 orex(0,r
,0, 0xb8 + REG_VALUE(r
)); /* mov $xx, r */
456 } else if (v
== VT_LOCAL
) {
457 orex(1,0,r
,0x8d); /* lea xxx(%ebp), r */
458 gen_modrm(r
, VT_LOCAL
, sv
->sym
, fc
);
459 } else if (v
== VT_CMP
) {
461 if ((fc
& ~0x100) != TOK_NE
)
462 oad(0xb8 + REG_VALUE(r
), 0); /* mov $0, r */
464 oad(0xb8 + REG_VALUE(r
), 1); /* mov $1, r */
467 /* This was a float compare. If the parity bit is
468 set the result was unordered, meaning false for everything
469 except TOK_NE, and true for TOK_NE. */
471 o(0x037a + (REX_BASE(r
) << 8));
473 orex(0,r
,0, 0x0f); /* setxx %br */
475 o(0xc0 + REG_VALUE(r
));
476 } else if (v
== VT_JMP
|| v
== VT_JMPI
) {
479 oad(0xb8 + REG_VALUE(r
), t
); /* mov $1, r */
480 o(0x05eb + (REX_BASE(r
) << 8)); /* jmp after */
483 oad(0xb8 + REG_VALUE(r
), t
^ 1); /* mov $0, r */
485 if ((r
>= TREG_XMM0
) && (r
<= TREG_XMM7
)) {
487 /* gen_cvt_ftof(VT_DOUBLE); */
488 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
489 /* movsd -0x10(%rsp),%xmmN */
491 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
494 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
495 if ((ft
& VT_BTYPE
) == VT_FLOAT
) {
498 assert((ft
& VT_BTYPE
) == VT_DOUBLE
);
501 o(0xc0 + REG_VALUE(v
) + REG_VALUE(r
)*8);
503 } else if (r
== TREG_ST0
) {
504 assert((v
>= TREG_XMM0
) && (v
<= TREG_XMM7
));
505 /* gen_cvt_ftof(VT_LDOUBLE); */
506 /* movsd %xmmN,-0x10(%rsp) */
508 o(0x44 + REG_VALUE(r
)*8); /* %xmmN */
510 o(0xf02444dd); /* fldl -0x10(%rsp) */
513 o(0xc0 + REG_VALUE(r
) + REG_VALUE(v
) * 8); /* mov v, r */
519 /* store register 'r' in lvalue 'v' */
520 void store(int r
, SValue
*v
)
524 /* store the REX prefix in this variable when PIC is enabled */
529 v
= pe_getimport(v
, &v2
);
534 fr
= v
->r
& VT_VALMASK
;
537 #ifndef TCC_TARGET_PE
538 /* we need to access the variable via got */
539 if (fr
== VT_CONST
&& (v
->r
& VT_SYM
)) {
540 /* mov xx(%rip), %r11 */
542 gen_gotpcrel(TREG_R11
, v
->sym
, v
->c
.ul
);
543 pic
= is64_type(bt
) ? 0x49 : 0x41;
547 /* XXX: incorrect if float reg to reg */
548 if (bt
== VT_FLOAT
) {
551 o(0x7e0f); /* movd */
553 } else if (bt
== VT_DOUBLE
) {
556 o(0xd60f); /* movq */
558 } else if (bt
== VT_LDOUBLE
) {
559 o(0xc0d9); /* fld %st(0) */
567 if (bt
== VT_BYTE
|| bt
== VT_BOOL
)
569 else if (is64_type(bt
))
575 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
580 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
581 gen_modrm64(op64
, r
, v
->r
, v
->sym
, fc
);
582 } else if (fr
!= r
) {
583 /* XXX: don't we really come here? */
585 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
588 if (fr
== VT_CONST
|| fr
== VT_LOCAL
|| (v
->r
& VT_LVAL
)) {
589 gen_modrm(r
, v
->r
, v
->sym
, fc
);
590 } else if (fr
!= r
) {
591 /* XXX: don't we really come here? */
593 o(0xc0 + fr
+ r
* 8); /* mov r, fr */
598 /* 'is_jmp' is '1' if it is a jump */
599 static void gcall_or_jmp(int is_jmp
)
602 if ((vtop
->r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
604 if (vtop
->r
& VT_SYM
) {
605 /* relocation case */
606 greloc(cur_text_section
, vtop
->sym
,
607 ind
+ 1, R_X86_64_PLT32
);
609 /* put an empty PC32 relocation */
610 put_elf_reloc(symtab_section
, cur_text_section
,
611 ind
+ 1, R_X86_64_PC32
, 0);
613 oad(0xe8 + is_jmp
, vtop
->c
.ul
- 4); /* call/jmp im */
615 /* otherwise, indirect call */
619 o(0xff); /* call/jmp *r */
620 o(0xd0 + REG_VALUE(r
) + (is_jmp
<< 4));
627 static const uint8_t arg_regs
[REGN
] = {
628 TREG_RCX
, TREG_RDX
, TREG_R8
, TREG_R9
631 /* Prepare arguments in R10 and R11 rather than RCX and RDX
632 because gv() will not ever use these */
633 static int arg_prepare_reg(int idx
) {
634 if (idx
== 0 || idx
== 1)
635 /* idx=0: r10, idx=1: r11 */
638 return arg_regs
[idx
];
641 static int func_scratch
;
643 /* Generate function call. The function address is pushed first, then
644 all the parameters in call order. This functions pops all the
645 parameters and the function address. */
647 void gen_offs_sp(int b
, int r
, int d
)
649 orex(1,0,r
& 0x100 ? 0 : r
, b
);
651 o(0x2444 | (REG_VALUE(r
) << 3));
654 o(0x2484 | (REG_VALUE(r
) << 3));
659 /* Return the number of registers needed to return the struct, or 0 if
660 returning via struct pointer. */
661 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
)
664 *ret_align
= 1; // Never have to re-align return values for x86-64
665 size
= type_size(vt
, &align
);
669 } else if (size
> 4) {
672 } else if (size
> 2) {
675 } else if (size
> 1) {
684 static int is_sse_float(int t
) {
687 return bt
== VT_DOUBLE
|| bt
== VT_FLOAT
;
690 int gfunc_arg_size(CType
*type
) {
692 if (type
->t
& (VT_ARRAY
|VT_BITFIELD
))
694 return type_size(type
, &align
);
697 void gfunc_call(int nb_args
)
699 int size
, r
, args_size
, i
, d
, bt
, struct_size
;
702 args_size
= (nb_args
< REGN
? REGN
: nb_args
) * PTR_SIZE
;
705 /* for struct arguments, we need to call memcpy and the function
706 call breaks register passing arguments we are preparing.
707 So, we process arguments which will be passed by stack first. */
708 struct_size
= args_size
;
709 for(i
= 0; i
< nb_args
; i
++) {
714 bt
= (sv
->type
.t
& VT_BTYPE
);
715 size
= gfunc_arg_size(&sv
->type
);
718 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
720 if (bt
== VT_STRUCT
) {
721 /* align to stack align size */
722 size
= (size
+ 15) & ~15;
723 /* generate structure store */
725 gen_offs_sp(0x8d, r
, struct_size
);
728 /* generate memcpy call */
729 vset(&sv
->type
, r
| VT_LVAL
, 0);
733 } else if (bt
== VT_LDOUBLE
) {
735 gen_offs_sp(0xdb, 0x107, struct_size
);
740 if (func_scratch
< struct_size
)
741 func_scratch
= struct_size
;
744 struct_size
= args_size
;
746 for(i
= 0; i
< nb_args
; i
++) {
748 bt
= (vtop
->type
.t
& VT_BTYPE
);
750 size
= gfunc_arg_size(&vtop
->type
);
752 /* align to stack align size */
753 size
= (size
+ 15) & ~15;
756 gen_offs_sp(0x8d, d
, struct_size
);
757 gen_offs_sp(0x89, d
, arg
*8);
759 d
= arg_prepare_reg(arg
);
760 gen_offs_sp(0x8d, d
, struct_size
);
764 if (is_sse_float(vtop
->type
.t
)) {
765 gv(RC_XMM0
); /* only use one float register */
767 /* movq %xmm0, j*8(%rsp) */
768 gen_offs_sp(0xd60f66, 0x100, arg
*8);
770 /* movaps %xmm0, %xmmN */
772 o(0xc0 + (arg
<< 3));
773 d
= arg_prepare_reg(arg
);
774 /* mov %xmm0, %rxx */
777 o(0xc0 + REG_VALUE(d
));
780 if (bt
== VT_STRUCT
) {
781 vtop
->type
.ref
= NULL
;
782 vtop
->type
.t
= size
> 4 ? VT_LLONG
: size
> 2 ? VT_INT
783 : size
> 1 ? VT_SHORT
: VT_BYTE
;
788 gen_offs_sp(0x89, r
, arg
*8);
790 d
= arg_prepare_reg(arg
);
791 orex(1,d
,r
,0x89); /* mov */
792 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
800 /* Copy R10 and R11 into RCX and RDX, respectively */
802 o(0xd1894c); /* mov %r10, %rcx */
804 o(0xda894c); /* mov %r11, %rdx */
813 #define FUNC_PROLOG_SIZE 11
815 /* generate function prolog of type 't' */
816 void gfunc_prolog(CType
*func_type
)
818 int addr
, reg_param_index
, bt
, size
;
827 ind
+= FUNC_PROLOG_SIZE
;
828 func_sub_sp_offset
= ind
;
831 sym
= func_type
->ref
;
833 /* if the function returns a structure, then add an
834 implicit pointer parameter */
836 func_var
= (sym
->c
== FUNC_ELLIPSIS
);
837 size
= gfunc_arg_size(&func_vt
);
839 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
845 /* define parameters */
846 while ((sym
= sym
->next
) != NULL
) {
848 bt
= type
->t
& VT_BTYPE
;
849 size
= gfunc_arg_size(type
);
851 if (reg_param_index
< REGN
) {
852 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
854 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
| VT_REF
, addr
);
856 if (reg_param_index
< REGN
) {
857 /* save arguments passed by register */
858 if ((bt
== VT_FLOAT
) || (bt
== VT_DOUBLE
)) {
859 o(0xd60f66); /* movq */
860 gen_modrm(reg_param_index
, VT_LOCAL
, NULL
, addr
);
862 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
865 sym_push(sym
->v
& ~SYM_FIELD
, type
, VT_LOCAL
| VT_LVAL
, addr
);
871 while (reg_param_index
< REGN
) {
872 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
873 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, addr
);
880 /* generate function epilog */
881 void gfunc_epilog(void)
886 if (func_ret_sub
== 0) {
891 g(func_ret_sub
>> 8);
895 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
896 /* align local size to word & save local variables */
897 v
= (func_scratch
+ -loc
+ 15) & -16;
900 Sym
*sym
= external_global_sym(TOK___chkstk
, &func_old_type
, 0);
901 oad(0xb8, v
); /* mov stacksize, %eax */
902 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
903 greloc(cur_text_section
, sym
, ind
-4, R_X86_64_PC32
);
904 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
906 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
907 o(0xec8148); /* sub rsp, stacksize */
911 cur_text_section
->data_offset
= saved_ind
;
912 pe_add_unwind_data(ind
, saved_ind
, v
);
913 ind
= cur_text_section
->data_offset
;
918 static void gadd_sp(int val
)
920 if (val
== (char)val
) {
924 oad(0xc48148, val
); /* add $xxx, %rsp */
928 typedef enum X86_64_Mode
{
936 static X86_64_Mode
classify_x86_64_merge(X86_64_Mode a
, X86_64_Mode b
)
940 else if (a
== x86_64_mode_none
)
942 else if (b
== x86_64_mode_none
)
944 else if ((a
== x86_64_mode_memory
) || (b
== x86_64_mode_memory
))
945 return x86_64_mode_memory
;
946 else if ((a
== x86_64_mode_integer
) || (b
== x86_64_mode_integer
))
947 return x86_64_mode_integer
;
948 else if ((a
== x86_64_mode_x87
) || (b
== x86_64_mode_x87
))
949 return x86_64_mode_memory
;
951 return x86_64_mode_sse
;
954 static X86_64_Mode
classify_x86_64_inner(CType
*ty
)
959 switch (ty
->t
& VT_BTYPE
) {
960 case VT_VOID
: return x86_64_mode_none
;
969 case VT_ENUM
: return x86_64_mode_integer
;
972 case VT_DOUBLE
: return x86_64_mode_sse
;
974 case VT_LDOUBLE
: return x86_64_mode_x87
;
980 if (f
->next
&& (f
->c
== f
->next
->c
))
981 return x86_64_mode_memory
;
983 mode
= x86_64_mode_none
;
984 for (; f
; f
= f
->next
)
985 mode
= classify_x86_64_merge(mode
, classify_x86_64_inner(&f
->type
));
993 static X86_64_Mode
classify_x86_64_arg(CType
*ty
, CType
*ret
, int *psize
, int *palign
, int *reg_count
)
996 int size
, align
, ret_t
= 0;
998 if (ty
->t
& (VT_BITFIELD
|VT_ARRAY
)) {
1003 mode
= x86_64_mode_integer
;
1005 size
= type_size(ty
, &align
);
1006 *psize
= (size
+ 7) & ~7;
1007 *palign
= (align
+ 7) & ~7;
1010 mode
= x86_64_mode_memory
;
1012 mode
= classify_x86_64_inner(ty
);
1014 case x86_64_mode_integer
:
1020 ret_t
= (size
> 4) ? VT_LLONG
: VT_INT
;
1024 case x86_64_mode_x87
:
1029 case x86_64_mode_sse
:
1035 ret_t
= (size
> 4) ? VT_DOUBLE
: VT_FLOAT
;
1038 default: break; /* nothing to be done for x86_64_mode_memory and x86_64_mode_none*/
1051 ST_FUNC
int classify_x86_64_va_arg(CType
*ty
)
1053 /* This definition must be synced with stdarg.h */
1054 enum __va_arg_type
{
1055 __va_gen_reg
, __va_float_reg
, __va_stack
1057 int size
, align
, reg_count
;
1058 X86_64_Mode mode
= classify_x86_64_arg(ty
, NULL
, &size
, &align
, ®_count
);
1060 default: return __va_stack
;
1061 case x86_64_mode_integer
: return __va_gen_reg
;
1062 case x86_64_mode_sse
: return __va_float_reg
;
1066 /* Return the number of registers needed to return the struct, or 0 if
1067 returning via struct pointer. */
1068 ST_FUNC
int gfunc_sret(CType
*vt
, int variadic
, CType
*ret
, int *ret_align
)
1070 int size
, align
, reg_count
;
1071 *ret_align
= 1; // Never have to re-align return values for x86-64
1072 return (classify_x86_64_arg(vt
, ret
, &size
, &align
, ®_count
) != x86_64_mode_memory
);
1076 static const uint8_t arg_regs
[REGN
] = {
1077 TREG_RDI
, TREG_RSI
, TREG_RDX
, TREG_RCX
, TREG_R8
, TREG_R9
1080 static int arg_prepare_reg(int idx
) {
1081 if (idx
== 2 || idx
== 3)
1082 /* idx=2: r10, idx=3: r11 */
1085 return arg_regs
[idx
];
1088 /* Generate function call. The function address is pushed first, then
1089 all the parameters in call order. This functions pops all the
1090 parameters and the function address. */
1091 void gfunc_call(int nb_args
)
1095 int size
, align
, r
, args_size
, stack_adjust
, run_start
, run_end
, i
, reg_count
;
1096 int nb_reg_args
= 0;
1097 int nb_sse_args
= 0;
1098 int sse_reg
, gen_reg
;
1100 /* calculate the number of integer/float register arguments */
1101 for(i
= 0; i
< nb_args
; i
++) {
1102 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1103 if (mode
== x86_64_mode_sse
)
1104 nb_sse_args
+= reg_count
;
1105 else if (mode
== x86_64_mode_integer
)
1106 nb_reg_args
+= reg_count
;
1109 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1110 and ended by a 16-byte aligned argument. This is because, from the point of view of
1111 the callee, argument alignment is computed from the bottom up. */
1112 /* for struct arguments, we need to call memcpy and the function
1113 call breaks register passing arguments we are preparing.
1114 So, we process arguments which will be passed by stack first. */
1115 gen_reg
= nb_reg_args
;
1116 sse_reg
= nb_sse_args
;
1119 while (run_start
!= nb_args
) {
1120 int run_gen_reg
= gen_reg
, run_sse_reg
= sse_reg
;
1124 for(i
= run_start
; (i
< nb_args
) && (run_end
== nb_args
); i
++) {
1125 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1127 case x86_64_mode_memory
:
1128 case x86_64_mode_x87
:
1133 stack_adjust
+= size
;
1136 case x86_64_mode_sse
:
1137 sse_reg
-= reg_count
;
1138 if (sse_reg
+ reg_count
> 8) goto stack_arg
;
1141 case x86_64_mode_integer
:
1142 gen_reg
-= reg_count
;
1143 if (gen_reg
+ reg_count
> REGN
) goto stack_arg
;
1145 default: break; /* nothing to be done for x86_64_mode_none */
1149 gen_reg
= run_gen_reg
;
1150 sse_reg
= run_sse_reg
;
1152 /* adjust stack to align SSE boundary */
1153 if (stack_adjust
&= 15) {
1154 /* fetch cpu flag before the following sub will change the value */
1155 if (vtop
>= vstack
&& (vtop
->r
& VT_VALMASK
) == VT_CMP
)
1158 stack_adjust
= 16 - stack_adjust
;
1160 oad(0xec81, stack_adjust
); /* sub $xxx, %rsp */
1161 args_size
+= stack_adjust
;
1164 for(i
= run_start
; i
< run_end
;) {
1165 /* Swap argument to top, it will possibly be changed here,
1166 and might use more temps. At the end of the loop we keep
1167 in on the stack and swap it back to its original position
1168 if it is a register. */
1169 SValue tmp
= vtop
[0];
1173 mode
= classify_x86_64_arg(&vtop
->type
, NULL
, &size
, &align
, ®_count
);
1176 switch (vtop
->type
.t
& VT_BTYPE
) {
1178 if (mode
== x86_64_mode_sse
) {
1180 sse_reg
-= reg_count
;
1183 } else if (mode
== x86_64_mode_integer
) {
1185 gen_reg
-= reg_count
;
1191 /* allocate the necessary size on stack */
1193 oad(0xec81, size
); /* sub $xxx, %rsp */
1194 /* generate structure store */
1195 r
= get_reg(RC_INT
);
1196 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1197 o(0xe0 + REG_VALUE(r
));
1198 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1211 assert(mode
== x86_64_mode_sse
);
1215 o(0x50); /* push $rax */
1216 /* movq %xmmN, (%rsp) */
1218 o(0x04 + REG_VALUE(r
)*8);
1227 assert(mode
== x86_64_mode_integer
);
1229 /* XXX: implicit cast ? */
1230 if (gen_reg
> REGN
) {
1233 orex(0,r
,0,0x50 + REG_VALUE(r
)); /* push r */
1241 /* And swap the argument back to it's original position. */
1248 assert((vtop
->type
.t
== tmp
.type
.t
) && (vtop
->r
== tmp
.r
));
1257 /* handle 16 byte aligned arguments at end of run */
1258 run_start
= i
= run_end
;
1259 while (i
< nb_args
) {
1260 /* Rotate argument to top since it will always be popped */
1261 mode
= classify_x86_64_arg(&vtop
[-i
].type
, NULL
, &size
, &align
, ®_count
);
1267 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1269 oad(0xec8148, size
); /* sub $xxx, %rsp */
1270 o(0x7cdb); /* fstpt 0(%rsp) */
1275 assert(mode
== x86_64_mode_memory
);
1277 /* allocate the necessary size on stack */
1279 oad(0xec81, size
); /* sub $xxx, %rsp */
1280 /* generate structure store */
1281 r
= get_reg(RC_INT
);
1282 orex(1, r
, 0, 0x89); /* mov %rsp, r */
1283 o(0xe0 + REG_VALUE(r
));
1284 vset(&vtop
->type
, r
| VT_LVAL
, 0);
1295 /* XXX This should be superfluous. */
1296 save_regs(0); /* save used temporary registers */
1298 /* then, we prepare register passing arguments.
1299 Note that we cannot set RDX and RCX in this loop because gv()
1300 may break these temporary registers. Let's use R10 and R11
1302 assert(gen_reg
<= REGN
);
1303 assert(sse_reg
<= 8);
1304 for(i
= 0; i
< nb_args
; i
++) {
1305 mode
= classify_x86_64_arg(&vtop
->type
, &type
, &size
, &align
, ®_count
);
1306 /* Alter stack entry type so that gv() knows how to treat it */
1308 if (mode
== x86_64_mode_sse
) {
1309 if (reg_count
== 2) {
1311 gv(RC_FRET
); /* Use pair load into xmm0 & xmm1 */
1312 if (sse_reg
) { /* avoid redundant movaps %xmm0, %xmm0 */
1313 /* movaps %xmm0, %xmmN */
1315 o(0xc0 + (sse_reg
<< 3));
1316 /* movaps %xmm1, %xmmN */
1318 o(0xc1 + ((sse_reg
+1) << 3));
1321 assert(reg_count
== 1);
1323 /* Load directly to register */
1324 gv(RC_XMM0
<< sse_reg
);
1326 } else if (mode
== x86_64_mode_integer
) {
1328 /* XXX: implicit cast ? */
1329 gen_reg
-= reg_count
;
1331 int d
= arg_prepare_reg(gen_reg
);
1332 orex(1,d
,r
,0x89); /* mov */
1333 o(0xc0 + REG_VALUE(r
) * 8 + REG_VALUE(d
));
1334 if (reg_count
== 2) {
1335 d
= arg_prepare_reg(gen_reg
+1);
1336 orex(1,d
,vtop
->r2
,0x89); /* mov */
1337 o(0xc0 + REG_VALUE(vtop
->r2
) * 8 + REG_VALUE(d
));
1342 assert(gen_reg
== 0);
1343 assert(sse_reg
== 0);
1345 /* We shouldn't have many operands on the stack anymore, but the
1346 call address itself is still there, and it might be in %eax
1347 (or edx/ecx) currently, which the below writes would clobber.
1348 So evict all remaining operands here. */
1351 /* Copy R10 and R11 into RDX and RCX, respectively */
1352 if (nb_reg_args
> 2) {
1353 o(0xd2894c); /* mov %r10, %rdx */
1354 if (nb_reg_args
> 3) {
1355 o(0xd9894c); /* mov %r11, %rcx */
1359 oad(0xb8, nb_sse_args
< 8 ? nb_sse_args
: 8); /* mov nb_sse_args, %eax */
1367 #define FUNC_PROLOG_SIZE 11
1369 static void push_arg_reg(int i
) {
1371 gen_modrm64(0x89, arg_regs
[i
], VT_LOCAL
, NULL
, loc
);
1374 /* generate function prolog of type 't' */
1375 void gfunc_prolog(CType
*func_type
)
1378 int i
, addr
, align
, size
, reg_count
;
1379 int param_addr
= 0, reg_param_index
, sse_param_index
;
1383 sym
= func_type
->ref
;
1384 addr
= PTR_SIZE
* 2;
1386 ind
+= FUNC_PROLOG_SIZE
;
1387 func_sub_sp_offset
= ind
;
1390 if (func_type
->ref
->c
== FUNC_ELLIPSIS
) {
1391 int seen_reg_num
, seen_sse_num
, seen_stack_size
;
1392 seen_reg_num
= seen_sse_num
= 0;
1393 /* frame pointer and return address */
1394 seen_stack_size
= PTR_SIZE
* 2;
1395 /* count the number of seen parameters */
1396 sym
= func_type
->ref
;
1397 while ((sym
= sym
->next
) != NULL
) {
1399 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1403 seen_stack_size
= ((seen_stack_size
+ align
- 1) & -align
) + size
;
1406 case x86_64_mode_integer
:
1407 if (seen_reg_num
+ reg_count
<= 8) {
1408 seen_reg_num
+= reg_count
;
1415 case x86_64_mode_sse
:
1416 if (seen_sse_num
+ reg_count
<= 8) {
1417 seen_sse_num
+= reg_count
;
1427 /* movl $0x????????, -0x10(%rbp) */
1429 gen_le32(seen_reg_num
* 8);
1430 /* movl $0x????????, -0xc(%rbp) */
1432 gen_le32(seen_sse_num
* 16 + 48);
1433 /* movl $0x????????, -0x8(%rbp) */
1435 gen_le32(seen_stack_size
);
1437 /* save all register passing arguments */
1438 for (i
= 0; i
< 8; i
++) {
1440 o(0xd60f66); /* movq */
1441 gen_modrm(7 - i
, VT_LOCAL
, NULL
, loc
);
1442 /* movq $0, loc+8(%rbp) */
1447 for (i
= 0; i
< REGN
; i
++) {
1448 push_arg_reg(REGN
-1-i
);
1452 sym
= func_type
->ref
;
1453 reg_param_index
= 0;
1454 sse_param_index
= 0;
1456 /* if the function returns a structure, then add an
1457 implicit pointer parameter */
1458 func_vt
= sym
->type
;
1459 mode
= classify_x86_64_arg(&func_vt
, NULL
, &size
, &align
, ®_count
);
1460 if (mode
== x86_64_mode_memory
) {
1461 push_arg_reg(reg_param_index
);
1465 /* define parameters */
1466 while ((sym
= sym
->next
) != NULL
) {
1468 mode
= classify_x86_64_arg(type
, NULL
, &size
, &align
, ®_count
);
1470 case x86_64_mode_sse
:
1471 if (sse_param_index
+ reg_count
<= 8) {
1472 /* save arguments passed by register */
1473 loc
-= reg_count
* 8;
1475 for (i
= 0; i
< reg_count
; ++i
) {
1476 o(0xd60f66); /* movq */
1477 gen_modrm(sse_param_index
, VT_LOCAL
, NULL
, param_addr
+ i
*8);
1481 addr
= (addr
+ align
- 1) & -align
;
1484 sse_param_index
+= reg_count
;
1488 case x86_64_mode_memory
:
1489 case x86_64_mode_x87
:
1490 addr
= (addr
+ align
- 1) & -align
;
1495 case x86_64_mode_integer
: {
1496 if (reg_param_index
+ reg_count
<= REGN
) {
1497 /* save arguments passed by register */
1498 loc
-= reg_count
* 8;
1500 for (i
= 0; i
< reg_count
; ++i
) {
1501 gen_modrm64(0x89, arg_regs
[reg_param_index
], VT_LOCAL
, NULL
, param_addr
+ i
*8);
1505 addr
= (addr
+ align
- 1) & -align
;
1508 reg_param_index
+= reg_count
;
1512 default: break; /* nothing to be done for x86_64_mode_none */
1514 sym_push(sym
->v
& ~SYM_FIELD
, type
,
1515 VT_LOCAL
| VT_LVAL
, param_addr
);
1519 /* generate function epilog */
1520 void gfunc_epilog(void)
1524 o(0xc9); /* leave */
1525 if (func_ret_sub
== 0) {
1528 o(0xc2); /* ret n */
1530 g(func_ret_sub
>> 8);
1532 /* align local size to word & save local variables */
1533 v
= (-loc
+ 15) & -16;
1535 ind
= func_sub_sp_offset
- FUNC_PROLOG_SIZE
;
1536 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1537 o(0xec8148); /* sub rsp, stacksize */
1544 /* generate a jump to a label */
1547 return psym(0xe9, t
);
1550 /* generate a jump to a fixed address */
1551 void gjmp_addr(int a
)
1559 oad(0xe9, a
- ind
- 5);
1563 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1564 int gtst(int inv
, int t
)
1568 v
= vtop
->r
& VT_VALMASK
;
1570 /* fast case : can jump directly since flags are set */
1571 if (vtop
->c
.i
& 0x100)
1573 /* This was a float compare. If the parity flag is set
1574 the result was unordered. For anything except != this
1575 means false and we don't jump (anding both conditions).
1576 For != this means true (oring both).
1577 Take care about inverting the test. We need to jump
1578 to our target if the result was unordered and test wasn't NE,
1579 otherwise if unordered we don't want to jump. */
1580 vtop
->c
.i
&= ~0x100;
1581 if (!inv
== (vtop
->c
.i
!= TOK_NE
))
1582 o(0x067a); /* jp +6 */
1586 t
= psym(0x8a, t
); /* jp t */
1590 t
= psym((vtop
->c
.i
- 16) ^ inv
, t
);
1591 } else { /* VT_JMP || VT_JMPI */
1592 /* && or || optimization */
1593 if ((v
& 1) == inv
) {
1594 /* insert vtop->c jump list in t */
1597 p
= (int *)(cur_text_section
->data
+ *p
);
1609 /* generate an integer binary operation */
1610 void gen_opi(int op
)
1615 ll
= is64_type(vtop
[-1].type
.t
);
1616 uu
= (vtop
[-1].type
.t
& VT_UNSIGNED
) != 0;
1617 cc
= (vtop
->r
& (VT_VALMASK
| VT_LVAL
| VT_SYM
)) == VT_CONST
;
1621 case TOK_ADDC1
: /* add with carry generation */
1624 if (cc
&& (!ll
|| (int)vtop
->c
.ll
== vtop
->c
.ll
)) {
1631 /* XXX: generate inc and dec for smaller code ? */
1632 orex(ll
, r
, 0, 0x83);
1633 o(0xc0 | (opc
<< 3) | REG_VALUE(r
));
1636 orex(ll
, r
, 0, 0x81);
1637 oad(0xc0 | (opc
<< 3) | REG_VALUE(r
), c
);
1640 gv2(RC_INT
, RC_INT
);
1643 orex(ll
, r
, fr
, (opc
<< 3) | 0x01);
1644 o(0xc0 + REG_VALUE(r
) + REG_VALUE(fr
) * 8);
1647 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1653 case TOK_SUBC1
: /* sub with carry generation */
1656 case TOK_ADDC2
: /* add with carry use */
1659 case TOK_SUBC2
: /* sub with carry use */
1672 gv2(RC_INT
, RC_INT
);
1675 orex(ll
, fr
, r
, 0xaf0f); /* imul fr, r */
1676 o(0xc0 + REG_VALUE(fr
) + REG_VALUE(r
) * 8);
1688 opc
= 0xc0 | (opc
<< 3);
1694 orex(ll
, r
, 0, 0xc1); /* shl/shr/sar $xxx, r */
1695 o(opc
| REG_VALUE(r
));
1696 g(vtop
->c
.i
& (ll
? 63 : 31));
1698 /* we generate the shift in ecx */
1699 gv2(RC_INT
, RC_RCX
);
1701 orex(ll
, r
, 0, 0xd3); /* shl/shr/sar %cl, r */
1702 o(opc
| REG_VALUE(r
));
1715 /* first operand must be in eax */
1716 /* XXX: need better constraint for second operand */
1717 gv2(RC_RAX
, RC_RCX
);
1722 orex(ll
, 0, 0, uu
? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1723 orex(ll
, fr
, 0, 0xf7); /* div fr, %eax */
1724 o((uu
? 0xf0 : 0xf8) + REG_VALUE(fr
));
1725 if (op
== '%' || op
== TOK_UMOD
)
1737 void gen_opl(int op
)
1742 /* generate a floating point operation 'v = t1 op t2' instruction. The
1743 two operands are guaranted to have the same floating point type */
1744 /* XXX: need to use ST1 too */
1745 void gen_opf(int op
)
1747 int a
, ft
, fc
, swapped
, r
;
1749 (vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
? RC_ST0
: RC_FLOAT
;
1751 /* convert constants to memory references */
1752 if ((vtop
[-1].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
) {
1757 if ((vtop
[0].r
& (VT_VALMASK
| VT_LVAL
)) == VT_CONST
)
1760 /* must put at least one value in the floating point register */
1761 if ((vtop
[-1].r
& VT_LVAL
) &&
1762 (vtop
[0].r
& VT_LVAL
)) {
1768 /* swap the stack if needed so that t1 is the register and t2 is
1769 the memory reference */
1770 if (vtop
[-1].r
& VT_LVAL
) {
1774 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LDOUBLE
) {
1775 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1776 /* load on stack second operand */
1777 load(TREG_ST0
, vtop
);
1778 save_reg(TREG_RAX
); /* eax is used by FP comparison code */
1779 if (op
== TOK_GE
|| op
== TOK_GT
)
1781 else if (op
== TOK_EQ
|| op
== TOK_NE
)
1784 o(0xc9d9); /* fxch %st(1) */
1785 if (op
== TOK_EQ
|| op
== TOK_NE
)
1786 o(0xe9da); /* fucompp */
1788 o(0xd9de); /* fcompp */
1789 o(0xe0df); /* fnstsw %ax */
1791 o(0x45e480); /* and $0x45, %ah */
1792 o(0x40fC80); /* cmp $0x40, %ah */
1793 } else if (op
== TOK_NE
) {
1794 o(0x45e480); /* and $0x45, %ah */
1795 o(0x40f480); /* xor $0x40, %ah */
1797 } else if (op
== TOK_GE
|| op
== TOK_LE
) {
1798 o(0x05c4f6); /* test $0x05, %ah */
1801 o(0x45c4f6); /* test $0x45, %ah */
1808 /* no memory reference possible for long double operations */
1809 load(TREG_ST0
, vtop
);
1833 o(0xde); /* fxxxp %st, %st(1) */
1838 if (op
>= TOK_ULT
&& op
<= TOK_GT
) {
1839 /* if saved lvalue, then we must reload it */
1842 if ((r
& VT_VALMASK
) == VT_LLOCAL
) {
1844 r
= get_reg(RC_INT
);
1846 v1
.r
= VT_LOCAL
| VT_LVAL
;
1852 if (op
== TOK_EQ
|| op
== TOK_NE
) {
1855 if (op
== TOK_LE
|| op
== TOK_LT
)
1857 if (op
== TOK_LE
|| op
== TOK_GE
) {
1858 op
= 0x93; /* setae */
1860 op
= 0x97; /* seta */
1868 assert(!(vtop
[-1].r
& VT_LVAL
));
1870 if ((vtop
->type
.t
& VT_BTYPE
) == VT_DOUBLE
)
1872 if (op
== TOK_EQ
|| op
== TOK_NE
)
1873 o(0x2e0f); /* ucomisd */
1875 o(0x2f0f); /* comisd */
1877 if (vtop
->r
& VT_LVAL
) {
1878 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
1880 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
1885 vtop
->c
.i
= op
| 0x100;
1887 assert((vtop
->type
.t
& VT_BTYPE
) != VT_LDOUBLE
);
1905 assert((ft
& VT_BTYPE
) != VT_LDOUBLE
);
1908 /* if saved lvalue, then we must reload it */
1909 if ((vtop
->r
& VT_VALMASK
) == VT_LLOCAL
) {
1911 r
= get_reg(RC_INT
);
1913 v1
.r
= VT_LOCAL
| VT_LVAL
;
1919 assert(!(vtop
[-1].r
& VT_LVAL
));
1921 assert(vtop
->r
& VT_LVAL
);
1926 if ((ft
& VT_BTYPE
) == VT_DOUBLE
) {
1934 if (vtop
->r
& VT_LVAL
) {
1935 gen_modrm(vtop
[-1].r
, r
, vtop
->sym
, fc
);
1937 o(0xc0 + REG_VALUE(vtop
[0].r
) + REG_VALUE(vtop
[-1].r
)*8);
1945 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1946 and 'long long' cases. */
1947 void gen_cvt_itof(int t
)
1949 if ((t
& VT_BTYPE
) == VT_LDOUBLE
) {
1952 if ((vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1953 /* signed long long to float/double/long double (unsigned case
1954 is handled generically) */
1955 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1956 o(0x242cdf); /* fildll (%rsp) */
1957 o(0x08c48348); /* add $8, %rsp */
1958 } else if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1959 (VT_INT
| VT_UNSIGNED
)) {
1960 /* unsigned int to float/double/long double */
1961 o(0x6a); /* push $0 */
1963 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1964 o(0x242cdf); /* fildll (%rsp) */
1965 o(0x10c48348); /* add $16, %rsp */
1967 /* int to float/double/long double */
1968 o(0x50 + (vtop
->r
& VT_VALMASK
)); /* push r */
1969 o(0x2404db); /* fildl (%rsp) */
1970 o(0x08c48348); /* add $8, %rsp */
1974 int r
= get_reg(RC_FLOAT
);
1976 o(0xf2 + ((t
& VT_BTYPE
) == VT_FLOAT
?1:0));
1977 if ((vtop
->type
.t
& (VT_BTYPE
| VT_UNSIGNED
)) ==
1978 (VT_INT
| VT_UNSIGNED
) ||
1979 (vtop
->type
.t
& VT_BTYPE
) == VT_LLONG
) {
1983 o(0xc0 + (vtop
->r
& VT_VALMASK
) + REG_VALUE(r
)*8); /* cvtsi2sd */
1988 /* convert from one floating point type to another */
1989 void gen_cvt_ftof(int t
)
1997 if (bt
== VT_FLOAT
) {
1999 if (tbt
== VT_DOUBLE
) {
2000 o(0x140f); /* unpcklps */
2001 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2002 o(0x5a0f); /* cvtps2pd */
2003 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2004 } else if (tbt
== VT_LDOUBLE
) {
2006 /* movss %xmm0,-0x10(%rsp) */
2008 o(0x44 + REG_VALUE(vtop
->r
)*8);
2010 o(0xf02444d9); /* flds -0x10(%rsp) */
2013 } else if (bt
== VT_DOUBLE
) {
2015 if (tbt
== VT_FLOAT
) {
2016 o(0x140f66); /* unpcklpd */
2017 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2018 o(0x5a0f66); /* cvtpd2ps */
2019 o(0xc0 + REG_VALUE(vtop
->r
)*9);
2020 } else if (tbt
== VT_LDOUBLE
) {
2022 /* movsd %xmm0,-0x10(%rsp) */
2024 o(0x44 + REG_VALUE(vtop
->r
)*8);
2026 o(0xf02444dd); /* fldl -0x10(%rsp) */
2032 r
= get_reg(RC_FLOAT
);
2033 if (tbt
== VT_DOUBLE
) {
2034 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2035 /* movsd -0x10(%rsp),%xmm0 */
2037 o(0x44 + REG_VALUE(r
)*8);
2040 } else if (tbt
== VT_FLOAT
) {
2041 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2042 /* movss -0x10(%rsp),%xmm0 */
2044 o(0x44 + REG_VALUE(r
)*8);
2051 /* convert fp to int 't' type */
2052 void gen_cvt_ftoi(int t
)
2054 int ft
, bt
, size
, r
;
2057 if (bt
== VT_LDOUBLE
) {
2058 gen_cvt_ftof(VT_DOUBLE
);
2068 r
= get_reg(RC_INT
);
2069 if (bt
== VT_FLOAT
) {
2071 } else if (bt
== VT_DOUBLE
) {
2076 orex(size
== 8, r
, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2077 o(0xc0 + REG_VALUE(vtop
->r
) + REG_VALUE(r
)*8);
2081 /* computed goto support */
2088 /* Save the stack pointer onto the stack and return the location of its address */
2089 ST_FUNC
void gen_vla_sp_save(int addr
) {
2090 /* mov %rsp,addr(%rbp)*/
2091 gen_modrm64(0x89, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2094 /* Restore the SP from a location on the stack */
2095 ST_FUNC
void gen_vla_sp_restore(int addr
) {
2096 gen_modrm64(0x8b, TREG_RSP
, VT_LOCAL
, NULL
, addr
);
2099 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2100 ST_FUNC
void gen_vla_alloc(CType
*type
, int align
) {
2101 #ifdef TCC_TARGET_PE
2102 /* alloca does more than just adjust %rsp on Windows */
2103 vpush_global_sym(&func_old_type
, TOK_alloca
);
2104 vswap(); /* Move alloca ref past allocation size */
2106 vset(type
, REG_IRET
, 0);
2109 r
= gv(RC_INT
); /* allocation size */
2112 o(0xe0 | REG_VALUE(r
));
2113 /* We align to 16 bytes rather than align */
2118 o(0xe0 | REG_VALUE(r
));
2125 /* end of x86-64 code generator */
2126 /*************************************************************/
2127 #endif /* ! TARGET_DEFS_ONLY */
2128 /******************************************************/