Avoid warnings with gcc 4.8 + default CFLAGS
[tinycc.git] / x86_64-gen.c
blob3cb211a8b995438f069ac32ec3fa089b4b2766df
1 /*
2 * x86-64 code generator for TCC
4 * Copyright (c) 2008 Shinichiro Hamaji
6 * Based on i386-gen.c by Fabrice Bellard
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #ifdef TARGET_DEFS_ONLY
25 /* number of available registers */
26 #define NB_REGS 25
27 #define NB_ASM_REGS 8
29 /* a register can belong to several classes. The classes must be
30 sorted from more general to more precise (see gv2() code which does
31 assumptions on it). */
32 #define RC_INT 0x0001 /* generic integer register */
33 #define RC_FLOAT 0x0002 /* generic float register */
34 #define RC_RAX 0x0004
35 #define RC_RCX 0x0008
36 #define RC_RDX 0x0010
37 #define RC_ST0 0x0080 /* only for long double */
38 #define RC_R8 0x0100
39 #define RC_R9 0x0200
40 #define RC_R10 0x0400
41 #define RC_R11 0x0800
42 #define RC_XMM0 0x1000
43 #define RC_XMM1 0x2000
44 #define RC_XMM2 0x4000
45 #define RC_XMM3 0x8000
46 #define RC_XMM4 0x10000
47 #define RC_XMM5 0x20000
48 #define RC_XMM6 0x40000
49 #define RC_XMM7 0x80000
50 #define RC_IRET RC_RAX /* function return: integer register */
51 #define RC_LRET RC_RDX /* function return: second integer register */
52 #define RC_FRET RC_XMM0 /* function return: float register */
53 #define RC_QRET RC_XMM1 /* function return: second float register */
55 /* pretty names for the registers */
56 enum {
57 TREG_RAX = 0,
58 TREG_RCX = 1,
59 TREG_RDX = 2,
60 TREG_RSP = 4,
61 TREG_RSI = 6,
62 TREG_RDI = 7,
64 TREG_R8 = 8,
65 TREG_R9 = 9,
66 TREG_R10 = 10,
67 TREG_R11 = 11,
69 TREG_XMM0 = 16,
70 TREG_XMM1 = 17,
71 TREG_XMM2 = 18,
72 TREG_XMM3 = 19,
73 TREG_XMM4 = 20,
74 TREG_XMM5 = 21,
75 TREG_XMM6 = 22,
76 TREG_XMM7 = 23,
78 TREG_ST0 = 24,
80 TREG_MEM = 0x20,
83 #define REX_BASE(reg) (((reg) >> 3) & 1)
84 #define REG_VALUE(reg) ((reg) & 7)
86 /* return registers for function */
87 #define REG_IRET TREG_RAX /* single word int return register */
88 #define REG_LRET TREG_RDX /* second word return register (for long long) */
89 #define REG_FRET TREG_XMM0 /* float return register */
90 #define REG_QRET TREG_XMM1 /* second float return register */
92 /* defined if function parameters must be evaluated in reverse order */
93 #define INVERT_FUNC_PARAMS
95 /* pointer size, in bytes */
96 #define PTR_SIZE 8
98 /* long double size and alignment, in bytes */
99 #define LDOUBLE_SIZE 16
100 #define LDOUBLE_ALIGN 16
101 /* maximum alignment (for aligned attribute support) */
102 #define MAX_ALIGN 16
104 /******************************************************/
105 /* ELF defines */
107 #define EM_TCC_TARGET EM_X86_64
109 /* relocation type for 32 bit data relocation */
110 #define R_DATA_32 R_X86_64_32
111 #define R_DATA_PTR R_X86_64_64
112 #define R_JMP_SLOT R_X86_64_JUMP_SLOT
113 #define R_COPY R_X86_64_COPY
115 #define ELF_START_ADDR 0x08048000
116 #define ELF_PAGE_SIZE 0x1000
118 /******************************************************/
119 #else /* ! TARGET_DEFS_ONLY */
120 /******************************************************/
121 #include "tcc.h"
122 #include <assert.h>
124 ST_DATA const int reg_classes[NB_REGS] = {
125 /* eax */ RC_INT | RC_RAX,
126 /* ecx */ RC_INT | RC_RCX,
127 /* edx */ RC_INT | RC_RDX,
133 RC_R8,
134 RC_R9,
135 RC_R10,
136 RC_R11,
141 /* xmm0 */ RC_FLOAT | RC_XMM0,
142 /* xmm1 */ RC_FLOAT | RC_XMM1,
143 /* xmm2 */ RC_FLOAT | RC_XMM2,
144 /* xmm3 */ RC_FLOAT | RC_XMM3,
145 /* xmm4 */ RC_FLOAT | RC_XMM4,
146 /* xmm5 */ RC_FLOAT | RC_XMM5,
147 /* xmm6 an xmm7 are included so gv() can be used on them,
148 but they are not tagged with RC_FLOAT because they are
149 callee saved on Windows */
150 RC_XMM6,
151 RC_XMM7,
152 /* st0 */ RC_ST0
155 static unsigned long func_sub_sp_offset;
156 static int func_ret_sub;
158 /* XXX: make it faster ? */
159 void g(int c)
161 int ind1;
162 ind1 = ind + 1;
163 if (ind1 > cur_text_section->data_allocated)
164 section_realloc(cur_text_section, ind1);
165 cur_text_section->data[ind] = c;
166 ind = ind1;
169 void o(unsigned int c)
171 while (c) {
172 g(c);
173 c = c >> 8;
177 void gen_le16(int v)
179 g(v);
180 g(v >> 8);
183 void gen_le32(int c)
185 g(c);
186 g(c >> 8);
187 g(c >> 16);
188 g(c >> 24);
191 void gen_le64(int64_t c)
193 g(c);
194 g(c >> 8);
195 g(c >> 16);
196 g(c >> 24);
197 g(c >> 32);
198 g(c >> 40);
199 g(c >> 48);
200 g(c >> 56);
203 void orex(int ll, int r, int r2, int b)
205 if ((r & VT_VALMASK) >= VT_CONST)
206 r = 0;
207 if ((r2 & VT_VALMASK) >= VT_CONST)
208 r2 = 0;
209 if (ll || REX_BASE(r) || REX_BASE(r2))
210 o(0x40 | REX_BASE(r) | (REX_BASE(r2) << 2) | (ll << 3));
211 o(b);
214 /* output a symbol and patch all calls to it */
215 void gsym_addr(int t, int a)
217 int n, *ptr;
218 while (t) {
219 ptr = (int *)(cur_text_section->data + t);
220 n = *ptr; /* next value */
221 *ptr = a - t - 4;
222 t = n;
226 void gsym(int t)
228 gsym_addr(t, ind);
231 /* psym is used to put an instruction with a data field which is a
232 reference to a symbol. It is in fact the same as oad ! */
233 #define psym oad
235 static int is64_type(int t)
237 return ((t & VT_BTYPE) == VT_PTR ||
238 (t & VT_BTYPE) == VT_FUNC ||
239 (t & VT_BTYPE) == VT_LLONG);
242 /* instruction + 4 bytes data. Return the address of the data */
243 ST_FUNC int oad(int c, int s)
245 int ind1;
247 o(c);
248 ind1 = ind + 4;
249 if (ind1 > cur_text_section->data_allocated)
250 section_realloc(cur_text_section, ind1);
251 *(int *)(cur_text_section->data + ind) = s;
252 s = ind;
253 ind = ind1;
254 return s;
257 ST_FUNC void gen_addr32(int r, Sym *sym, int c)
259 if (r & VT_SYM)
260 greloc(cur_text_section, sym, ind, R_X86_64_32);
261 gen_le32(c);
264 /* output constant with relocation if 'r & VT_SYM' is true */
265 ST_FUNC void gen_addr64(int r, Sym *sym, int64_t c)
267 if (r & VT_SYM)
268 greloc(cur_text_section, sym, ind, R_X86_64_64);
269 gen_le64(c);
272 /* output constant with relocation if 'r & VT_SYM' is true */
273 ST_FUNC void gen_addrpc32(int r, Sym *sym, int c)
275 if (r & VT_SYM)
276 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
277 gen_le32(c-4);
280 /* output got address with relocation */
281 static void gen_gotpcrel(int r, Sym *sym, int c)
283 #ifndef TCC_TARGET_PE
284 Section *sr;
285 ElfW(Rela) *rel;
286 greloc(cur_text_section, sym, ind, R_X86_64_GOTPCREL);
287 sr = cur_text_section->reloc;
288 rel = (ElfW(Rela) *)(sr->data + sr->data_offset - sizeof(ElfW(Rela)));
289 rel->r_addend = -4;
290 #else
291 printf("picpic: %s %x %x | %02x %02x %02x\n", get_tok_str(sym->v, NULL), c, r,
292 cur_text_section->data[ind-3],
293 cur_text_section->data[ind-2],
294 cur_text_section->data[ind-1]
296 greloc(cur_text_section, sym, ind, R_X86_64_PC32);
297 #endif
298 gen_le32(0);
299 if (c) {
300 /* we use add c, %xxx for displacement */
301 orex(1, r, 0, 0x81);
302 o(0xc0 + REG_VALUE(r));
303 gen_le32(c);
307 static void gen_modrm_impl(int op_reg, int r, Sym *sym, int c, int is_got)
309 op_reg = REG_VALUE(op_reg) << 3;
310 if ((r & VT_VALMASK) == VT_CONST) {
311 /* constant memory reference */
312 o(0x05 | op_reg);
313 if (is_got) {
314 gen_gotpcrel(r, sym, c);
315 } else {
316 gen_addrpc32(r, sym, c);
318 } else if ((r & VT_VALMASK) == VT_LOCAL) {
319 /* currently, we use only ebp as base */
320 if (c == (char)c) {
321 /* short reference */
322 o(0x45 | op_reg);
323 g(c);
324 } else {
325 oad(0x85 | op_reg, c);
327 } else if ((r & VT_VALMASK) >= TREG_MEM) {
328 if (c) {
329 g(0x80 | op_reg | REG_VALUE(r));
330 gen_le32(c);
331 } else {
332 g(0x00 | op_reg | REG_VALUE(r));
334 } else {
335 g(0x00 | op_reg | REG_VALUE(r));
339 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
340 opcode bits */
341 static void gen_modrm(int op_reg, int r, Sym *sym, int c)
343 gen_modrm_impl(op_reg, r, sym, c, 0);
346 /* generate a modrm reference. 'op_reg' contains the addtionnal 3
347 opcode bits */
348 static void gen_modrm64(int opcode, int op_reg, int r, Sym *sym, int c)
350 int is_got;
351 is_got = (op_reg & TREG_MEM) && !(sym->type.t & VT_STATIC);
352 orex(1, r, op_reg, opcode);
353 gen_modrm_impl(op_reg, r, sym, c, is_got);
357 /* load 'r' from value 'sv' */
358 void load(int r, SValue *sv)
360 int v, t, ft, fc, fr;
361 SValue v1;
363 #ifdef TCC_TARGET_PE
364 SValue v2;
365 sv = pe_getimport(sv, &v2);
366 #endif
368 fr = sv->r;
369 ft = sv->type.t;
370 fc = sv->c.ul;
372 #ifndef TCC_TARGET_PE
373 /* we use indirect access via got */
374 if ((fr & VT_VALMASK) == VT_CONST && (fr & VT_SYM) &&
375 (fr & VT_LVAL) && !(sv->sym->type.t & VT_STATIC)) {
376 /* use the result register as a temporal register */
377 int tr = r | TREG_MEM;
378 if (is_float(ft)) {
379 /* we cannot use float registers as a temporal register */
380 tr = get_reg(RC_INT) | TREG_MEM;
382 gen_modrm64(0x8b, tr, fr, sv->sym, 0);
384 /* load from the temporal register */
385 fr = tr | VT_LVAL;
387 #endif
389 v = fr & VT_VALMASK;
390 if (fr & VT_LVAL) {
391 int b, ll;
392 if (v == VT_LLOCAL) {
393 v1.type.t = VT_PTR;
394 v1.r = VT_LOCAL | VT_LVAL;
395 v1.c.ul = fc;
396 fr = r;
397 if (!(reg_classes[fr] & RC_INT))
398 fr = get_reg(RC_INT);
399 load(fr, &v1);
401 ll = 0;
402 if ((ft & VT_BTYPE) == VT_FLOAT) {
403 b = 0x6e0f66;
404 r = REG_VALUE(r); /* movd */
405 } else if ((ft & VT_BTYPE) == VT_DOUBLE) {
406 b = 0x7e0ff3; /* movq */
407 r = REG_VALUE(r);
408 } else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
409 b = 0xdb, r = 5; /* fldt */
410 } else if ((ft & VT_TYPE) == VT_BYTE || (ft & VT_TYPE) == VT_BOOL) {
411 b = 0xbe0f; /* movsbl */
412 } else if ((ft & VT_TYPE) == (VT_BYTE | VT_UNSIGNED)) {
413 b = 0xb60f; /* movzbl */
414 } else if ((ft & VT_TYPE) == VT_SHORT) {
415 b = 0xbf0f; /* movswl */
416 } else if ((ft & VT_TYPE) == (VT_SHORT | VT_UNSIGNED)) {
417 b = 0xb70f; /* movzwl */
418 } else {
419 assert(((ft & VT_BTYPE) == VT_INT) || ((ft & VT_BTYPE) == VT_LLONG)
420 || ((ft & VT_BTYPE) == VT_PTR) || ((ft & VT_BTYPE) == VT_ENUM)
421 || ((ft & VT_BTYPE) == VT_FUNC));
422 ll = is64_type(ft);
423 b = 0x8b;
425 if (ll) {
426 gen_modrm64(b, r, fr, sv->sym, fc);
427 } else {
428 orex(ll, fr, r, b);
429 gen_modrm(r, fr, sv->sym, fc);
431 } else {
432 if (v == VT_CONST) {
433 if (fr & VT_SYM) {
434 #ifdef TCC_TARGET_PE
435 orex(1,0,r,0x8d);
436 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
437 gen_addrpc32(fr, sv->sym, fc);
438 #else
439 if (sv->sym->type.t & VT_STATIC) {
440 orex(1,0,r,0x8d);
441 o(0x05 + REG_VALUE(r) * 8); /* lea xx(%rip), r */
442 gen_addrpc32(fr, sv->sym, fc);
443 } else {
444 orex(1,0,r,0x8b);
445 o(0x05 + REG_VALUE(r) * 8); /* mov xx(%rip), r */
446 gen_gotpcrel(r, sv->sym, fc);
448 #endif
449 } else if (is64_type(ft)) {
450 orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
451 gen_le64(sv->c.ull);
452 } else {
453 orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
454 gen_le32(fc);
456 } else if (v == VT_LOCAL) {
457 orex(1,0,r,0x8d); /* lea xxx(%ebp), r */
458 gen_modrm(r, VT_LOCAL, sv->sym, fc);
459 } else if (v == VT_CMP) {
460 orex(0,r,0,0);
461 if ((fc & ~0x100) != TOK_NE)
462 oad(0xb8 + REG_VALUE(r), 0); /* mov $0, r */
463 else
464 oad(0xb8 + REG_VALUE(r), 1); /* mov $1, r */
465 if (fc & 0x100)
467 /* This was a float compare. If the parity bit is
468 set the result was unordered, meaning false for everything
469 except TOK_NE, and true for TOK_NE. */
470 fc &= ~0x100;
471 o(0x037a + (REX_BASE(r) << 8));
473 orex(0,r,0, 0x0f); /* setxx %br */
474 o(fc);
475 o(0xc0 + REG_VALUE(r));
476 } else if (v == VT_JMP || v == VT_JMPI) {
477 t = v & 1;
478 orex(0,r,0,0);
479 oad(0xb8 + REG_VALUE(r), t); /* mov $1, r */
480 o(0x05eb + (REX_BASE(r) << 8)); /* jmp after */
481 gsym(fc);
482 orex(0,r,0,0);
483 oad(0xb8 + REG_VALUE(r), t ^ 1); /* mov $0, r */
484 } else if (v != r) {
485 if ((r >= TREG_XMM0) && (r <= TREG_XMM7)) {
486 if (v == TREG_ST0) {
487 /* gen_cvt_ftof(VT_DOUBLE); */
488 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
489 /* movsd -0x10(%rsp),%xmmN */
490 o(0x100ff2);
491 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
492 o(0xf024);
493 } else {
494 assert((v >= TREG_XMM0) && (v <= TREG_XMM7));
495 if ((ft & VT_BTYPE) == VT_FLOAT) {
496 o(0x100ff3);
497 } else {
498 assert((ft & VT_BTYPE) == VT_DOUBLE);
499 o(0x100ff2);
501 o(0xc0 + REG_VALUE(v) + REG_VALUE(r)*8);
503 } else if (r == TREG_ST0) {
504 assert((v >= TREG_XMM0) || (v <= TREG_XMM7));
505 /* gen_cvt_ftof(VT_LDOUBLE); */
506 /* movsd %xmmN,-0x10(%rsp) */
507 o(0x110ff2);
508 o(0x44 + REG_VALUE(r)*8); /* %xmmN */
509 o(0xf024);
510 o(0xf02444dd); /* fldl -0x10(%rsp) */
511 } else {
512 orex(1,r,v, 0x89);
513 o(0xc0 + REG_VALUE(r) + REG_VALUE(v) * 8); /* mov v, r */
519 /* store register 'r' in lvalue 'v' */
520 void store(int r, SValue *v)
522 int fr, bt, ft, fc;
523 int op64 = 0;
524 /* store the REX prefix in this variable when PIC is enabled */
525 int pic = 0;
527 #ifdef TCC_TARGET_PE
528 SValue v2;
529 v = pe_getimport(v, &v2);
530 #endif
532 ft = v->type.t;
533 fc = v->c.ul;
534 fr = v->r & VT_VALMASK;
535 bt = ft & VT_BTYPE;
537 #ifndef TCC_TARGET_PE
538 /* we need to access the variable via got */
539 if (fr == VT_CONST && (v->r & VT_SYM)) {
540 /* mov xx(%rip), %r11 */
541 o(0x1d8b4c);
542 gen_gotpcrel(TREG_R11, v->sym, v->c.ul);
543 pic = is64_type(bt) ? 0x49 : 0x41;
545 #endif
547 /* XXX: incorrect if float reg to reg */
548 if (bt == VT_FLOAT) {
549 o(0x66);
550 o(pic);
551 o(0x7e0f); /* movd */
552 r = REG_VALUE(r);
553 } else if (bt == VT_DOUBLE) {
554 o(0x66);
555 o(pic);
556 o(0xd60f); /* movq */
557 r = REG_VALUE(r);
558 } else if (bt == VT_LDOUBLE) {
559 o(0xc0d9); /* fld %st(0) */
560 o(pic);
561 o(0xdb); /* fstpt */
562 r = 7;
563 } else {
564 if (bt == VT_SHORT)
565 o(0x66);
566 o(pic);
567 if (bt == VT_BYTE || bt == VT_BOOL)
568 orex(0, 0, r, 0x88);
569 else if (is64_type(bt))
570 op64 = 0x89;
571 else
572 orex(0, 0, r, 0x89);
574 if (pic) {
575 /* xxx r, (%r11) where xxx is mov, movq, fld, or etc */
576 if (op64)
577 o(op64);
578 o(3 + (r << 3));
579 } else if (op64) {
580 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
581 gen_modrm64(op64, r, v->r, v->sym, fc);
582 } else if (fr != r) {
583 /* XXX: don't we really come here? */
584 abort();
585 o(0xc0 + fr + r * 8); /* mov r, fr */
587 } else {
588 if (fr == VT_CONST || fr == VT_LOCAL || (v->r & VT_LVAL)) {
589 gen_modrm(r, v->r, v->sym, fc);
590 } else if (fr != r) {
591 /* XXX: don't we really come here? */
592 abort();
593 o(0xc0 + fr + r * 8); /* mov r, fr */
598 /* 'is_jmp' is '1' if it is a jump */
599 static void gcall_or_jmp(int is_jmp)
601 int r;
602 if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
603 /* constant case */
604 if (vtop->r & VT_SYM) {
605 /* relocation case */
606 greloc(cur_text_section, vtop->sym,
607 ind + 1, R_X86_64_PC32);
608 } else {
609 /* put an empty PC32 relocation */
610 put_elf_reloc(symtab_section, cur_text_section,
611 ind + 1, R_X86_64_PC32, 0);
613 oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
614 } else {
615 /* otherwise, indirect call */
616 r = TREG_R11;
617 load(r, vtop);
618 o(0x41); /* REX */
619 o(0xff); /* call/jmp *r */
620 o(0xd0 + REG_VALUE(r) + (is_jmp << 4));
624 #ifdef TCC_TARGET_PE
626 #define REGN 4
627 static const uint8_t arg_regs[REGN] = {
628 TREG_RCX, TREG_RDX, TREG_R8, TREG_R9
631 /* Prepare arguments in R10 and R11 rather than RCX and RDX
632 because gv() will not ever use these */
633 static int arg_prepare_reg(int idx) {
634 if (idx == 0 || idx == 1)
635 /* idx=0: r10, idx=1: r11 */
636 return idx + 10;
637 else
638 return arg_regs[idx];
641 static int func_scratch;
643 /* Generate function call. The function address is pushed first, then
644 all the parameters in call order. This functions pops all the
645 parameters and the function address. */
647 void gen_offs_sp(int b, int r, int d)
649 orex(1,0,r & 0x100 ? 0 : r, b);
650 if (d == (char)d) {
651 o(0x2444 | (REG_VALUE(r) << 3));
652 g(d);
653 } else {
654 o(0x2484 | (REG_VALUE(r) << 3));
655 gen_le32(d);
659 /* Return 1 if this function returns via an sret pointer, 0 otherwise */
660 ST_FUNC int gfunc_sret(CType *vt, CType *ret, int *ret_align)
662 int size, align;
663 *ret_align = 1; // Never have to re-align return values for x86-64
664 size = type_size(vt, &align);
665 ret->ref = NULL;
666 if (size > 8) {
667 return 1;
668 } else if (size > 4) {
669 ret->t = VT_LLONG;
670 return 0;
671 } else if (size > 2) {
672 ret->t = VT_INT;
673 return 0;
674 } else if (size > 1) {
675 ret->t = VT_SHORT;
676 return 0;
677 } else {
678 ret->t = VT_BYTE;
679 return 0;
683 static int is_sse_float(int t) {
684 int bt;
685 bt = t & VT_BTYPE;
686 return bt == VT_DOUBLE || bt == VT_FLOAT;
689 int gfunc_arg_size(CType *type) {
690 int align;
691 if (type->t & (VT_ARRAY|VT_BITFIELD))
692 return 8;
693 return type_size(type, &align);
696 void gfunc_call(int nb_args)
698 int size, r, args_size, i, d, bt, struct_size;
699 int arg;
701 args_size = (nb_args < REGN ? REGN : nb_args) * PTR_SIZE;
702 arg = nb_args;
704 /* for struct arguments, we need to call memcpy and the function
705 call breaks register passing arguments we are preparing.
706 So, we process arguments which will be passed by stack first. */
707 struct_size = args_size;
708 for(i = 0; i < nb_args; i++) {
709 SValue *sv;
711 --arg;
712 sv = &vtop[-i];
713 bt = (sv->type.t & VT_BTYPE);
714 size = gfunc_arg_size(&sv->type);
716 if (size <= 8)
717 continue; /* arguments smaller than 8 bytes passed in registers or on stack */
719 if (bt == VT_STRUCT) {
720 /* align to stack align size */
721 size = (size + 15) & ~15;
722 /* generate structure store */
723 r = get_reg(RC_INT);
724 gen_offs_sp(0x8d, r, struct_size);
725 struct_size += size;
727 /* generate memcpy call */
728 vset(&sv->type, r | VT_LVAL, 0);
729 vpushv(sv);
730 vstore();
731 --vtop;
732 } else if (bt == VT_LDOUBLE) {
733 gv(RC_ST0);
734 gen_offs_sp(0xdb, 0x107, struct_size);
735 struct_size += 16;
739 if (func_scratch < struct_size)
740 func_scratch = struct_size;
742 arg = nb_args;
743 struct_size = args_size;
745 for(i = 0; i < nb_args; i++) {
746 --arg;
747 bt = (vtop->type.t & VT_BTYPE);
749 size = gfunc_arg_size(&vtop->type);
750 if (size > 8) {
751 /* align to stack align size */
752 size = (size + 15) & ~15;
753 if (arg >= REGN) {
754 d = get_reg(RC_INT);
755 gen_offs_sp(0x8d, d, struct_size);
756 gen_offs_sp(0x89, d, arg*8);
757 } else {
758 d = arg_prepare_reg(arg);
759 gen_offs_sp(0x8d, d, struct_size);
761 struct_size += size;
762 } else {
763 if (is_sse_float(vtop->type.t)) {
764 gv(RC_XMM0); /* only use one float register */
765 if (arg >= REGN) {
766 /* movq %xmm0, j*8(%rsp) */
767 gen_offs_sp(0xd60f66, 0x100, arg*8);
768 } else {
769 /* movaps %xmm0, %xmmN */
770 o(0x280f);
771 o(0xc0 + (arg << 3));
772 d = arg_prepare_reg(arg);
773 /* mov %xmm0, %rxx */
774 o(0x66);
775 orex(1,d,0, 0x7e0f);
776 o(0xc0 + REG_VALUE(d));
778 } else {
779 if (bt == VT_STRUCT) {
780 vtop->type.ref = NULL;
781 vtop->type.t = size > 4 ? VT_LLONG : size > 2 ? VT_INT
782 : size > 1 ? VT_SHORT : VT_BYTE;
785 r = gv(RC_INT);
786 if (arg >= REGN) {
787 gen_offs_sp(0x89, r, arg*8);
788 } else {
789 d = arg_prepare_reg(arg);
790 orex(1,d,r,0x89); /* mov */
791 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
795 vtop--;
797 save_regs(0);
799 /* Copy R10 and R11 into RCX and RDX, respectively */
800 if (nb_args > 0) {
801 o(0xd1894c); /* mov %r10, %rcx */
802 if (nb_args > 1) {
803 o(0xda894c); /* mov %r11, %rdx */
807 gcall_or_jmp(0);
808 vtop--;
812 #define FUNC_PROLOG_SIZE 11
814 /* generate function prolog of type 't' */
815 void gfunc_prolog(CType *func_type)
817 int addr, reg_param_index, bt, size;
818 Sym *sym;
819 CType *type;
821 func_ret_sub = 0;
822 func_scratch = 0;
823 loc = 0;
825 addr = PTR_SIZE * 2;
826 ind += FUNC_PROLOG_SIZE;
827 func_sub_sp_offset = ind;
828 reg_param_index = 0;
830 sym = func_type->ref;
832 /* if the function returns a structure, then add an
833 implicit pointer parameter */
834 func_vt = sym->type;
835 size = gfunc_arg_size(&func_vt);
836 if (size > 8) {
837 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
838 func_vc = addr;
839 reg_param_index++;
840 addr += 8;
843 /* define parameters */
844 while ((sym = sym->next) != NULL) {
845 type = &sym->type;
846 bt = type->t & VT_BTYPE;
847 size = gfunc_arg_size(type);
848 if (size > 8) {
849 if (reg_param_index < REGN) {
850 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
852 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL | VT_REF, addr);
853 } else {
854 if (reg_param_index < REGN) {
855 /* save arguments passed by register */
856 if ((bt == VT_FLOAT) || (bt == VT_DOUBLE)) {
857 o(0xd60f66); /* movq */
858 gen_modrm(reg_param_index, VT_LOCAL, NULL, addr);
859 } else {
860 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
863 sym_push(sym->v & ~SYM_FIELD, type, VT_LOCAL | VT_LVAL, addr);
865 addr += 8;
866 reg_param_index++;
869 while (reg_param_index < REGN) {
870 if (func_type->ref->c == FUNC_ELLIPSIS) {
871 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, addr);
872 addr += 8;
874 reg_param_index++;
878 /* generate function epilog */
879 void gfunc_epilog(void)
881 int v, saved_ind;
883 o(0xc9); /* leave */
884 if (func_ret_sub == 0) {
885 o(0xc3); /* ret */
886 } else {
887 o(0xc2); /* ret n */
888 g(func_ret_sub);
889 g(func_ret_sub >> 8);
892 saved_ind = ind;
893 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
894 /* align local size to word & save local variables */
895 v = (func_scratch + -loc + 15) & -16;
897 if (v >= 4096) {
898 Sym *sym = external_global_sym(TOK___chkstk, &func_old_type, 0);
899 oad(0xb8, v); /* mov stacksize, %eax */
900 oad(0xe8, -4); /* call __chkstk, (does the stackframe too) */
901 greloc(cur_text_section, sym, ind-4, R_X86_64_PC32);
902 o(0x90); /* fill for FUNC_PROLOG_SIZE = 11 bytes */
903 } else {
904 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
905 o(0xec8148); /* sub rsp, stacksize */
906 gen_le32(v);
909 cur_text_section->data_offset = saved_ind;
910 pe_add_unwind_data(ind, saved_ind, v);
911 ind = cur_text_section->data_offset;
914 #else
916 static void gadd_sp(int val)
918 if (val == (char)val) {
919 o(0xc48348);
920 g(val);
921 } else {
922 oad(0xc48148, val); /* add $xxx, %rsp */
926 typedef enum X86_64_Mode {
927 x86_64_mode_none,
928 x86_64_mode_memory,
929 x86_64_mode_integer,
930 x86_64_mode_sse,
931 x86_64_mode_x87
932 } X86_64_Mode;
934 static X86_64_Mode classify_x86_64_merge(X86_64_Mode a, X86_64_Mode b) {
935 if (a == b)
936 return a;
937 else if (a == x86_64_mode_none)
938 return b;
939 else if (b == x86_64_mode_none)
940 return a;
941 else if ((a == x86_64_mode_memory) || (b == x86_64_mode_memory))
942 return x86_64_mode_memory;
943 else if ((a == x86_64_mode_integer) || (b == x86_64_mode_integer))
944 return x86_64_mode_integer;
945 else if ((a == x86_64_mode_x87) || (b == x86_64_mode_x87))
946 return x86_64_mode_memory;
947 else
948 return x86_64_mode_sse;
951 static X86_64_Mode classify_x86_64_inner(CType *ty) {
952 X86_64_Mode mode;
953 Sym *f;
955 switch (ty->t & VT_BTYPE) {
956 case VT_VOID: return x86_64_mode_none;
958 case VT_INT:
959 case VT_BYTE:
960 case VT_SHORT:
961 case VT_LLONG:
962 case VT_BOOL:
963 case VT_PTR:
964 case VT_FUNC:
965 case VT_ENUM: return x86_64_mode_integer;
967 case VT_FLOAT:
968 case VT_DOUBLE: return x86_64_mode_sse;
970 case VT_LDOUBLE: return x86_64_mode_x87;
972 case VT_STRUCT:
973 f = ty->ref;
975 // Detect union
976 if (f->next && (f->c == f->next->c))
977 return x86_64_mode_memory;
979 mode = x86_64_mode_none;
980 for (; f; f = f->next)
981 mode = classify_x86_64_merge(mode, classify_x86_64_inner(&f->type));
983 return mode;
986 assert(0);
989 static X86_64_Mode classify_x86_64_arg(CType *ty, CType *ret, int *psize, int *palign, int *reg_count) {
990 X86_64_Mode mode;
991 int size, align, ret_t = 0;
993 if (ty->t & (VT_BITFIELD|VT_ARRAY)) {
994 *psize = 8;
995 *reg_count = 1;
996 ret_t = ty->t;
997 mode = x86_64_mode_integer;
998 } else {
999 size = type_size(ty, &align);
1000 *psize = (size + 7) & ~7;
1001 *palign = (align + 7) & ~7;
1003 if (size > 16) {
1004 mode = x86_64_mode_memory;
1005 } else {
1006 mode = classify_x86_64_inner(ty);
1007 switch (mode) {
1008 case x86_64_mode_integer:
1009 if (size > 8) {
1010 *reg_count = 2;
1011 ret_t = VT_QLONG;
1012 } else {
1013 *reg_count = 1;
1014 ret_t = (size > 4) ? VT_LLONG : VT_INT;
1016 break;
1018 case x86_64_mode_x87:
1019 *reg_count = 1;
1020 ret_t = VT_LDOUBLE;
1021 break;
1023 case x86_64_mode_sse:
1024 if (size > 8) {
1025 *reg_count = 2;
1026 ret_t = VT_QFLOAT;
1027 } else {
1028 *reg_count = 1;
1029 ret_t = (size > 4) ? VT_DOUBLE : VT_FLOAT;
1031 break;
1032 case x86_64_mode_memory: /* avoid warning */
1033 case x86_64_mode_none:
1034 tcc_error("argument type not handled in classify_x86_64_arg\n");
1039 if (ret) {
1040 ret->ref = NULL;
1041 ret->t = ret_t;
1044 return mode;
1047 ST_FUNC int classify_x86_64_va_arg(CType *ty) {
1048 /* This definition must be synced with stdarg.h */
1049 enum __va_arg_type {
1050 __va_gen_reg, __va_float_reg, __va_stack
1052 int size, align, reg_count;
1053 X86_64_Mode mode = classify_x86_64_arg(ty, NULL, &size, &align, &reg_count);
1054 switch (mode) {
1055 default: return __va_stack;
1056 case x86_64_mode_integer: return __va_gen_reg;
1057 case x86_64_mode_sse: return __va_float_reg;
1061 /* Return 1 if this function returns via an sret pointer, 0 otherwise */
1062 int gfunc_sret(CType *vt, CType *ret, int *ret_align) {
1063 int size, align, reg_count;
1064 *ret_align = 1; // Never have to re-align return values for x86-64
1065 return (classify_x86_64_arg(vt, ret, &size, &align, &reg_count) == x86_64_mode_memory);
1068 #define REGN 6
1069 static const uint8_t arg_regs[REGN] = {
1070 TREG_RDI, TREG_RSI, TREG_RDX, TREG_RCX, TREG_R8, TREG_R9
1073 static int arg_prepare_reg(int idx) {
1074 if (idx == 2 || idx == 3)
1075 /* idx=2: r10, idx=3: r11 */
1076 return idx + 8;
1077 else
1078 return arg_regs[idx];
1081 /* Generate function call. The function address is pushed first, then
1082 all the parameters in call order. This functions pops all the
1083 parameters and the function address. */
1084 void gfunc_call(int nb_args)
1086 X86_64_Mode mode;
1087 CType type;
1088 int size, align, r, args_size, stack_adjust, run_start, run_end, i, reg_count;
1089 int nb_reg_args = 0;
1090 int nb_sse_args = 0;
1091 int sse_reg, gen_reg;
1093 /* calculate the number of integer/float register arguments */
1094 for(i = 0; i < nb_args; i++) {
1095 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1096 if (mode == x86_64_mode_sse)
1097 nb_sse_args += reg_count;
1098 else if (mode == x86_64_mode_integer)
1099 nb_reg_args += reg_count;
1102 /* arguments are collected in runs. Each run is a collection of 8-byte aligned arguments
1103 and ended by a 16-byte aligned argument. This is because, from the point of view of
1104 the callee, argument alignment is computed from the bottom up. */
1105 /* for struct arguments, we need to call memcpy and the function
1106 call breaks register passing arguments we are preparing.
1107 So, we process arguments which will be passed by stack first. */
1108 gen_reg = nb_reg_args;
1109 sse_reg = nb_sse_args;
1110 run_start = 0;
1111 args_size = 0;
1112 while (run_start != nb_args) {
1113 int run_gen_reg = gen_reg, run_sse_reg = sse_reg;
1115 run_end = nb_args;
1116 stack_adjust = 0;
1117 for(i = run_start; (i < nb_args) && (run_end == nb_args); i++) {
1118 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1119 switch (mode) {
1120 case x86_64_mode_memory:
1121 case x86_64_mode_x87:
1122 stack_arg:
1123 if (align == 16)
1124 run_end = i;
1125 else
1126 stack_adjust += size;
1127 break;
1129 case x86_64_mode_sse:
1130 sse_reg -= reg_count;
1131 if (sse_reg + reg_count > 8) goto stack_arg;
1132 break;
1134 case x86_64_mode_integer:
1135 gen_reg -= reg_count;
1136 if (gen_reg + reg_count > REGN) goto stack_arg;
1137 break;
1138 case x86_64_mode_none: /* avoid warning */
1139 tcc_error("argument type not handled in gfunc_call");
1143 gen_reg = run_gen_reg;
1144 sse_reg = run_sse_reg;
1146 /* adjust stack to align SSE boundary */
1147 if (stack_adjust &= 15) {
1148 /* fetch cpu flag before the following sub will change the value */
1149 if (vtop >= vstack && (vtop->r & VT_VALMASK) == VT_CMP)
1150 gv(RC_INT);
1152 stack_adjust = 16 - stack_adjust;
1153 o(0x48);
1154 oad(0xec81, stack_adjust); /* sub $xxx, %rsp */
1155 args_size += stack_adjust;
1158 for(i = run_start; i < run_end;) {
1159 /* Swap argument to top, it will possibly be changed here,
1160 and might use more temps. At the end of the loop we keep
1161 in on the stack and swap it back to its original position
1162 if it is a register. */
1163 SValue tmp = vtop[0];
1164 vtop[0] = vtop[-i];
1165 vtop[-i] = tmp;
1167 mode = classify_x86_64_arg(&vtop->type, NULL, &size, &align, &reg_count);
1169 int arg_stored = 1;
1170 switch (vtop->type.t & VT_BTYPE) {
1171 case VT_STRUCT:
1172 if (mode == x86_64_mode_sse) {
1173 if (sse_reg > 8)
1174 sse_reg -= reg_count;
1175 else
1176 arg_stored = 0;
1177 } else if (mode == x86_64_mode_integer) {
1178 if (gen_reg > REGN)
1179 gen_reg -= reg_count;
1180 else
1181 arg_stored = 0;
1184 if (arg_stored) {
1185 /* allocate the necessary size on stack */
1186 o(0x48);
1187 oad(0xec81, size); /* sub $xxx, %rsp */
1188 /* generate structure store */
1189 r = get_reg(RC_INT);
1190 orex(1, r, 0, 0x89); /* mov %rsp, r */
1191 o(0xe0 + REG_VALUE(r));
1192 vset(&vtop->type, r | VT_LVAL, 0);
1193 vswap();
1194 vstore();
1195 args_size += size;
1197 break;
1199 case VT_LDOUBLE:
1200 assert(0);
1201 break;
1203 case VT_FLOAT:
1204 case VT_DOUBLE:
1205 assert(mode == x86_64_mode_sse);
1206 if (sse_reg > 8) {
1207 --sse_reg;
1208 r = gv(RC_FLOAT);
1209 o(0x50); /* push $rax */
1210 /* movq %xmmN, (%rsp) */
1211 o(0xd60f66);
1212 o(0x04 + REG_VALUE(r)*8);
1213 o(0x24);
1214 args_size += size;
1215 } else {
1216 arg_stored = 0;
1218 break;
1220 default:
1221 assert(mode == x86_64_mode_integer);
1222 /* simple type */
1223 /* XXX: implicit cast ? */
1224 if (gen_reg > REGN) {
1225 --gen_reg;
1226 r = gv(RC_INT);
1227 orex(0,r,0,0x50 + REG_VALUE(r)); /* push r */
1228 args_size += size;
1229 } else {
1230 arg_stored = 0;
1232 break;
1235 /* And swap the argument back to it's original position. */
1236 tmp = vtop[0];
1237 vtop[0] = vtop[-i];
1238 vtop[-i] = tmp;
1240 if (arg_stored) {
1241 vrotb(i+1);
1242 assert((vtop->type.t == tmp.type.t) && (vtop->r == tmp.r));
1243 vpop();
1244 --nb_args;
1245 --run_end;
1246 } else {
1247 ++i;
1251 /* handle 16 byte aligned arguments at end of run */
1252 run_start = i = run_end;
1253 while (i < nb_args) {
1254 /* Rotate argument to top since it will always be popped */
1255 mode = classify_x86_64_arg(&vtop[-i].type, NULL, &size, &align, &reg_count);
1256 if (align != 16)
1257 break;
1259 vrotb(i+1);
1261 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1262 gv(RC_ST0);
1263 oad(0xec8148, size); /* sub $xxx, %rsp */
1264 o(0x7cdb); /* fstpt 0(%rsp) */
1265 g(0x24);
1266 g(0x00);
1267 args_size += size;
1268 } else {
1269 assert(mode == x86_64_mode_memory);
1271 /* allocate the necessary size on stack */
1272 o(0x48);
1273 oad(0xec81, size); /* sub $xxx, %rsp */
1274 /* generate structure store */
1275 r = get_reg(RC_INT);
1276 orex(1, r, 0, 0x89); /* mov %rsp, r */
1277 o(0xe0 + REG_VALUE(r));
1278 vset(&vtop->type, r | VT_LVAL, 0);
1279 vswap();
1280 vstore();
1281 args_size += size;
1284 vpop();
1285 --nb_args;
1289 /* XXX This should be superfluous. */
1290 save_regs(0); /* save used temporary registers */
1292 /* then, we prepare register passing arguments.
1293 Note that we cannot set RDX and RCX in this loop because gv()
1294 may break these temporary registers. Let's use R10 and R11
1295 instead of them */
1296 assert(gen_reg <= REGN);
1297 assert(sse_reg <= 8);
1298 for(i = 0; i < nb_args; i++) {
1299 mode = classify_x86_64_arg(&vtop->type, &type, &size, &align, &reg_count);
1300 /* Alter stack entry type so that gv() knows how to treat it */
1301 vtop->type = type;
1302 if (mode == x86_64_mode_sse) {
1303 if (reg_count == 2) {
1304 sse_reg -= 2;
1305 gv(RC_FRET); /* Use pair load into xmm0 & xmm1 */
1306 if (sse_reg) { /* avoid redundant movaps %xmm0, %xmm0 */
1307 /* movaps %xmm0, %xmmN */
1308 o(0x280f);
1309 o(0xc0 + (sse_reg << 3));
1310 /* movaps %xmm1, %xmmN */
1311 o(0x280f);
1312 o(0xc1 + ((sse_reg+1) << 3));
1314 } else {
1315 assert(reg_count == 1);
1316 --sse_reg;
1317 /* Load directly to register */
1318 gv(RC_XMM0 << sse_reg);
1320 } else if (mode == x86_64_mode_integer) {
1321 /* simple type */
1322 /* XXX: implicit cast ? */
1323 gen_reg -= reg_count;
1324 r = gv(RC_INT);
1325 int d = arg_prepare_reg(gen_reg);
1326 orex(1,d,r,0x89); /* mov */
1327 o(0xc0 + REG_VALUE(r) * 8 + REG_VALUE(d));
1328 if (reg_count == 2) {
1329 d = arg_prepare_reg(gen_reg+1);
1330 orex(1,d,vtop->r2,0x89); /* mov */
1331 o(0xc0 + REG_VALUE(vtop->r2) * 8 + REG_VALUE(d));
1334 vtop--;
1336 assert(gen_reg == 0);
1337 assert(sse_reg == 0);
1339 /* We shouldn't have many operands on the stack anymore, but the
1340 call address itself is still there, and it might be in %eax
1341 (or edx/ecx) currently, which the below writes would clobber.
1342 So evict all remaining operands here. */
1343 save_regs(0);
1345 /* Copy R10 and R11 into RDX and RCX, respectively */
1346 if (nb_reg_args > 2) {
1347 o(0xd2894c); /* mov %r10, %rdx */
1348 if (nb_reg_args > 3) {
1349 o(0xd9894c); /* mov %r11, %rcx */
1353 oad(0xb8, nb_sse_args < 8 ? nb_sse_args : 8); /* mov nb_sse_args, %eax */
1354 gcall_or_jmp(0);
1355 if (args_size)
1356 gadd_sp(args_size);
1357 vtop--;
1361 #define FUNC_PROLOG_SIZE 11
1363 static void push_arg_reg(int i) {
1364 loc -= 8;
1365 gen_modrm64(0x89, arg_regs[i], VT_LOCAL, NULL, loc);
1368 /* generate function prolog of type 't' */
1369 void gfunc_prolog(CType *func_type)
1371 X86_64_Mode mode;
1372 int i, addr, align, size, reg_count;
1373 int param_addr = 0, reg_param_index, sse_param_index;
1374 Sym *sym;
1375 CType *type;
1377 sym = func_type->ref;
1378 addr = PTR_SIZE * 2;
1379 loc = 0;
1380 ind += FUNC_PROLOG_SIZE;
1381 func_sub_sp_offset = ind;
1382 func_ret_sub = 0;
1384 if (func_type->ref->c == FUNC_ELLIPSIS) {
1385 int seen_reg_num, seen_sse_num, seen_stack_size;
1386 seen_reg_num = seen_sse_num = 0;
1387 /* frame pointer and return address */
1388 seen_stack_size = PTR_SIZE * 2;
1389 /* count the number of seen parameters */
1390 sym = func_type->ref;
1391 while ((sym = sym->next) != NULL) {
1392 type = &sym->type;
1393 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1394 switch (mode) {
1395 default:
1396 stack_arg:
1397 seen_stack_size = ((seen_stack_size + align - 1) & -align) + size;
1398 break;
1400 case x86_64_mode_integer:
1401 if (seen_reg_num + reg_count <= 8) {
1402 seen_reg_num += reg_count;
1403 } else {
1404 seen_reg_num = 8;
1405 goto stack_arg;
1407 break;
1409 case x86_64_mode_sse:
1410 if (seen_sse_num + reg_count <= 8) {
1411 seen_sse_num += reg_count;
1412 } else {
1413 seen_sse_num = 8;
1414 goto stack_arg;
1416 break;
1420 loc -= 16;
1421 /* movl $0x????????, -0x10(%rbp) */
1422 o(0xf045c7);
1423 gen_le32(seen_reg_num * 8);
1424 /* movl $0x????????, -0xc(%rbp) */
1425 o(0xf445c7);
1426 gen_le32(seen_sse_num * 16 + 48);
1427 /* movl $0x????????, -0x8(%rbp) */
1428 o(0xf845c7);
1429 gen_le32(seen_stack_size);
1431 /* save all register passing arguments */
1432 for (i = 0; i < 8; i++) {
1433 loc -= 16;
1434 o(0xd60f66); /* movq */
1435 gen_modrm(7 - i, VT_LOCAL, NULL, loc);
1436 /* movq $0, loc+8(%rbp) */
1437 o(0x85c748);
1438 gen_le32(loc + 8);
1439 gen_le32(0);
1441 for (i = 0; i < REGN; i++) {
1442 push_arg_reg(REGN-1-i);
1446 sym = func_type->ref;
1447 reg_param_index = 0;
1448 sse_param_index = 0;
1450 /* if the function returns a structure, then add an
1451 implicit pointer parameter */
1452 func_vt = sym->type;
1453 mode = classify_x86_64_arg(&func_vt, NULL, &size, &align, &reg_count);
1454 if (mode == x86_64_mode_memory) {
1455 push_arg_reg(reg_param_index);
1456 func_vc = loc;
1457 reg_param_index++;
1459 /* define parameters */
1460 while ((sym = sym->next) != NULL) {
1461 type = &sym->type;
1462 mode = classify_x86_64_arg(type, NULL, &size, &align, &reg_count);
1463 switch (mode) {
1464 case x86_64_mode_sse:
1465 if (sse_param_index + reg_count <= 8) {
1466 /* save arguments passed by register */
1467 loc -= reg_count * 8;
1468 param_addr = loc;
1469 for (i = 0; i < reg_count; ++i) {
1470 o(0xd60f66); /* movq */
1471 gen_modrm(sse_param_index, VT_LOCAL, NULL, param_addr + i*8);
1472 ++sse_param_index;
1474 } else {
1475 addr = (addr + align - 1) & -align;
1476 param_addr = addr;
1477 addr += size;
1478 sse_param_index += reg_count;
1480 break;
1482 case x86_64_mode_memory:
1483 case x86_64_mode_x87:
1484 addr = (addr + align - 1) & -align;
1485 param_addr = addr;
1486 addr += size;
1487 break;
1489 case x86_64_mode_integer: {
1490 if (reg_param_index + reg_count <= REGN) {
1491 /* save arguments passed by register */
1492 loc -= reg_count * 8;
1493 param_addr = loc;
1494 for (i = 0; i < reg_count; ++i) {
1495 gen_modrm64(0x89, arg_regs[reg_param_index], VT_LOCAL, NULL, param_addr + i*8);
1496 ++reg_param_index;
1498 } else {
1499 addr = (addr + align - 1) & -align;
1500 param_addr = addr;
1501 addr += size;
1502 reg_param_index += reg_count;
1504 break;
1506 case x86_64_mode_none:
1507 tcc_error("argument type not handled in gfunc_prolog\n");
1509 sym_push(sym->v & ~SYM_FIELD, type,
1510 VT_LOCAL | VT_LVAL, param_addr);
1514 /* generate function epilog */
1515 void gfunc_epilog(void)
1517 int v, saved_ind;
1519 o(0xc9); /* leave */
1520 if (func_ret_sub == 0) {
1521 o(0xc3); /* ret */
1522 } else {
1523 o(0xc2); /* ret n */
1524 g(func_ret_sub);
1525 g(func_ret_sub >> 8);
1527 /* align local size to word & save local variables */
1528 v = (-loc + 15) & -16;
1529 saved_ind = ind;
1530 ind = func_sub_sp_offset - FUNC_PROLOG_SIZE;
1531 o(0xe5894855); /* push %rbp, mov %rsp, %rbp */
1532 o(0xec8148); /* sub rsp, stacksize */
1533 gen_le32(v);
1534 ind = saved_ind;
1537 #endif /* not PE */
1539 /* generate a jump to a label */
1540 int gjmp(int t)
1542 return psym(0xe9, t);
1545 /* generate a jump to a fixed address */
1546 void gjmp_addr(int a)
1548 int r;
1549 r = a - ind - 2;
1550 if (r == (char)r) {
1551 g(0xeb);
1552 g(r);
1553 } else {
1554 oad(0xe9, a - ind - 5);
1558 /* generate a test. set 'inv' to invert test. Stack entry is popped */
1559 int gtst(int inv, int t)
1561 int v, *p;
1563 v = vtop->r & VT_VALMASK;
1564 if (v == VT_CMP) {
1565 /* fast case : can jump directly since flags are set */
1566 if (vtop->c.i & 0x100)
1568 /* This was a float compare. If the parity flag is set
1569 the result was unordered. For anything except != this
1570 means false and we don't jump (anding both conditions).
1571 For != this means true (oring both).
1572 Take care about inverting the test. We need to jump
1573 to our target if the result was unordered and test wasn't NE,
1574 otherwise if unordered we don't want to jump. */
1575 vtop->c.i &= ~0x100;
1576 if (!inv == (vtop->c.i != TOK_NE))
1577 o(0x067a); /* jp +6 */
1578 else
1580 g(0x0f);
1581 t = psym(0x8a, t); /* jp t */
1584 g(0x0f);
1585 t = psym((vtop->c.i - 16) ^ inv, t);
1586 } else if (v == VT_JMP || v == VT_JMPI) {
1587 /* && or || optimization */
1588 if ((v & 1) == inv) {
1589 /* insert vtop->c jump list in t */
1590 p = &vtop->c.i;
1591 while (*p != 0)
1592 p = (int *)(cur_text_section->data + *p);
1593 *p = t;
1594 t = vtop->c.i;
1595 } else {
1596 t = gjmp(t);
1597 gsym(vtop->c.i);
1599 } else {
1600 if (is_float(vtop->type.t) ||
1601 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1602 vpushi(0);
1603 gen_op(TOK_NE);
1605 if ((vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST) {
1606 /* constant jmp optimization */
1607 if ((vtop->c.i != 0) != inv)
1608 t = gjmp(t);
1609 } else {
1610 v = gv(RC_INT);
1611 orex(0,v,v,0x85);
1612 o(0xc0 + REG_VALUE(v) * 9);
1613 g(0x0f);
1614 t = psym(0x85 ^ inv, t);
1617 vtop--;
1618 return t;
1621 /* generate an integer binary operation */
1622 void gen_opi(int op)
1624 int r, fr, opc, c;
1625 int ll, uu, cc;
1627 ll = is64_type(vtop[-1].type.t);
1628 uu = (vtop[-1].type.t & VT_UNSIGNED) != 0;
1629 cc = (vtop->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
1631 switch(op) {
1632 case '+':
1633 case TOK_ADDC1: /* add with carry generation */
1634 opc = 0;
1635 gen_op8:
1636 if (cc && (!ll || (int)vtop->c.ll == vtop->c.ll)) {
1637 /* constant case */
1638 vswap();
1639 r = gv(RC_INT);
1640 vswap();
1641 c = vtop->c.i;
1642 if (c == (char)c) {
1643 /* XXX: generate inc and dec for smaller code ? */
1644 orex(ll, r, 0, 0x83);
1645 o(0xc0 | (opc << 3) | REG_VALUE(r));
1646 g(c);
1647 } else {
1648 orex(ll, r, 0, 0x81);
1649 oad(0xc0 | (opc << 3) | REG_VALUE(r), c);
1651 } else {
1652 gv2(RC_INT, RC_INT);
1653 r = vtop[-1].r;
1654 fr = vtop[0].r;
1655 orex(ll, r, fr, (opc << 3) | 0x01);
1656 o(0xc0 + REG_VALUE(r) + REG_VALUE(fr) * 8);
1658 vtop--;
1659 if (op >= TOK_ULT && op <= TOK_GT) {
1660 vtop->r = VT_CMP;
1661 vtop->c.i = op;
1663 break;
1664 case '-':
1665 case TOK_SUBC1: /* sub with carry generation */
1666 opc = 5;
1667 goto gen_op8;
1668 case TOK_ADDC2: /* add with carry use */
1669 opc = 2;
1670 goto gen_op8;
1671 case TOK_SUBC2: /* sub with carry use */
1672 opc = 3;
1673 goto gen_op8;
1674 case '&':
1675 opc = 4;
1676 goto gen_op8;
1677 case '^':
1678 opc = 6;
1679 goto gen_op8;
1680 case '|':
1681 opc = 1;
1682 goto gen_op8;
1683 case '*':
1684 gv2(RC_INT, RC_INT);
1685 r = vtop[-1].r;
1686 fr = vtop[0].r;
1687 orex(ll, fr, r, 0xaf0f); /* imul fr, r */
1688 o(0xc0 + REG_VALUE(fr) + REG_VALUE(r) * 8);
1689 vtop--;
1690 break;
1691 case TOK_SHL:
1692 opc = 4;
1693 goto gen_shift;
1694 case TOK_SHR:
1695 opc = 5;
1696 goto gen_shift;
1697 case TOK_SAR:
1698 opc = 7;
1699 gen_shift:
1700 opc = 0xc0 | (opc << 3);
1701 if (cc) {
1702 /* constant case */
1703 vswap();
1704 r = gv(RC_INT);
1705 vswap();
1706 orex(ll, r, 0, 0xc1); /* shl/shr/sar $xxx, r */
1707 o(opc | REG_VALUE(r));
1708 g(vtop->c.i & (ll ? 63 : 31));
1709 } else {
1710 /* we generate the shift in ecx */
1711 gv2(RC_INT, RC_RCX);
1712 r = vtop[-1].r;
1713 orex(ll, r, 0, 0xd3); /* shl/shr/sar %cl, r */
1714 o(opc | REG_VALUE(r));
1716 vtop--;
1717 break;
1718 case TOK_UDIV:
1719 case TOK_UMOD:
1720 uu = 1;
1721 goto divmod;
1722 case '/':
1723 case '%':
1724 case TOK_PDIV:
1725 uu = 0;
1726 divmod:
1727 /* first operand must be in eax */
1728 /* XXX: need better constraint for second operand */
1729 gv2(RC_RAX, RC_RCX);
1730 r = vtop[-1].r;
1731 fr = vtop[0].r;
1732 vtop--;
1733 save_reg(TREG_RDX);
1734 orex(ll, 0, 0, uu ? 0xd231 : 0x99); /* xor %edx,%edx : cqto */
1735 orex(ll, fr, 0, 0xf7); /* div fr, %eax */
1736 o((uu ? 0xf0 : 0xf8) + REG_VALUE(fr));
1737 if (op == '%' || op == TOK_UMOD)
1738 r = TREG_RDX;
1739 else
1740 r = TREG_RAX;
1741 vtop->r = r;
1742 break;
1743 default:
1744 opc = 7;
1745 goto gen_op8;
1749 void gen_opl(int op)
1751 gen_opi(op);
1754 /* generate a floating point operation 'v = t1 op t2' instruction. The
1755 two operands are guaranted to have the same floating point type */
1756 /* XXX: need to use ST1 too */
1757 void gen_opf(int op)
1759 int a, ft, fc, swapped, r;
1760 int float_type =
1761 (vtop->type.t & VT_BTYPE) == VT_LDOUBLE ? RC_ST0 : RC_FLOAT;
1763 /* convert constants to memory references */
1764 if ((vtop[-1].r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
1765 vswap();
1766 gv(float_type);
1767 vswap();
1769 if ((vtop[0].r & (VT_VALMASK | VT_LVAL)) == VT_CONST)
1770 gv(float_type);
1772 /* must put at least one value in the floating point register */
1773 if ((vtop[-1].r & VT_LVAL) &&
1774 (vtop[0].r & VT_LVAL)) {
1775 vswap();
1776 gv(float_type);
1777 vswap();
1779 swapped = 0;
1780 /* swap the stack if needed so that t1 is the register and t2 is
1781 the memory reference */
1782 if (vtop[-1].r & VT_LVAL) {
1783 vswap();
1784 swapped = 1;
1786 if ((vtop->type.t & VT_BTYPE) == VT_LDOUBLE) {
1787 if (op >= TOK_ULT && op <= TOK_GT) {
1788 /* load on stack second operand */
1789 load(TREG_ST0, vtop);
1790 save_reg(TREG_RAX); /* eax is used by FP comparison code */
1791 if (op == TOK_GE || op == TOK_GT)
1792 swapped = !swapped;
1793 else if (op == TOK_EQ || op == TOK_NE)
1794 swapped = 0;
1795 if (swapped)
1796 o(0xc9d9); /* fxch %st(1) */
1797 o(0xe9da); /* fucompp */
1798 o(0xe0df); /* fnstsw %ax */
1799 if (op == TOK_EQ) {
1800 o(0x45e480); /* and $0x45, %ah */
1801 o(0x40fC80); /* cmp $0x40, %ah */
1802 } else if (op == TOK_NE) {
1803 o(0x45e480); /* and $0x45, %ah */
1804 o(0x40f480); /* xor $0x40, %ah */
1805 op = TOK_NE;
1806 } else if (op == TOK_GE || op == TOK_LE) {
1807 o(0x05c4f6); /* test $0x05, %ah */
1808 op = TOK_EQ;
1809 } else {
1810 o(0x45c4f6); /* test $0x45, %ah */
1811 op = TOK_EQ;
1813 vtop--;
1814 vtop->r = VT_CMP;
1815 vtop->c.i = op;
1816 } else {
1817 /* no memory reference possible for long double operations */
1818 load(TREG_ST0, vtop);
1819 swapped = !swapped;
1821 switch(op) {
1822 default:
1823 case '+':
1824 a = 0;
1825 break;
1826 case '-':
1827 a = 4;
1828 if (swapped)
1829 a++;
1830 break;
1831 case '*':
1832 a = 1;
1833 break;
1834 case '/':
1835 a = 6;
1836 if (swapped)
1837 a++;
1838 break;
1840 ft = vtop->type.t;
1841 fc = vtop->c.ul;
1842 o(0xde); /* fxxxp %st, %st(1) */
1843 o(0xc1 + (a << 3));
1844 vtop--;
1846 } else {
1847 if (op >= TOK_ULT && op <= TOK_GT) {
1848 /* if saved lvalue, then we must reload it */
1849 r = vtop->r;
1850 fc = vtop->c.ul;
1851 if ((r & VT_VALMASK) == VT_LLOCAL) {
1852 SValue v1;
1853 r = get_reg(RC_INT);
1854 v1.type.t = VT_PTR;
1855 v1.r = VT_LOCAL | VT_LVAL;
1856 v1.c.ul = fc;
1857 load(r, &v1);
1858 fc = 0;
1861 if (op == TOK_EQ || op == TOK_NE) {
1862 swapped = 0;
1863 } else {
1864 if (op == TOK_LE || op == TOK_LT)
1865 swapped = !swapped;
1866 if (op == TOK_LE || op == TOK_GE) {
1867 op = 0x93; /* setae */
1868 } else {
1869 op = 0x97; /* seta */
1873 if (swapped) {
1874 gv(RC_FLOAT);
1875 vswap();
1877 assert(!(vtop[-1].r & VT_LVAL));
1879 if ((vtop->type.t & VT_BTYPE) == VT_DOUBLE)
1880 o(0x66);
1881 o(0x2e0f); /* ucomisd */
1883 if (vtop->r & VT_LVAL) {
1884 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1885 } else {
1886 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1889 vtop--;
1890 vtop->r = VT_CMP;
1891 vtop->c.i = op | 0x100;
1892 } else {
1893 assert((vtop->type.t & VT_BTYPE) != VT_LDOUBLE);
1894 switch(op) {
1895 default:
1896 case '+':
1897 a = 0;
1898 break;
1899 case '-':
1900 a = 4;
1901 break;
1902 case '*':
1903 a = 1;
1904 break;
1905 case '/':
1906 a = 6;
1907 break;
1909 ft = vtop->type.t;
1910 fc = vtop->c.ul;
1911 assert((ft & VT_BTYPE) != VT_LDOUBLE);
1913 r = vtop->r;
1914 /* if saved lvalue, then we must reload it */
1915 if ((vtop->r & VT_VALMASK) == VT_LLOCAL) {
1916 SValue v1;
1917 r = get_reg(RC_INT);
1918 v1.type.t = VT_PTR;
1919 v1.r = VT_LOCAL | VT_LVAL;
1920 v1.c.ul = fc;
1921 load(r, &v1);
1922 fc = 0;
1925 assert(!(vtop[-1].r & VT_LVAL));
1926 if (swapped) {
1927 assert(vtop->r & VT_LVAL);
1928 gv(RC_FLOAT);
1929 vswap();
1932 if ((ft & VT_BTYPE) == VT_DOUBLE) {
1933 o(0xf2);
1934 } else {
1935 o(0xf3);
1937 o(0x0f);
1938 o(0x58 + a);
1940 if (vtop->r & VT_LVAL) {
1941 gen_modrm(vtop[-1].r, r, vtop->sym, fc);
1942 } else {
1943 o(0xc0 + REG_VALUE(vtop[0].r) + REG_VALUE(vtop[-1].r)*8);
1946 vtop--;
1951 /* convert integers to fp 't' type. Must handle 'int', 'unsigned int'
1952 and 'long long' cases. */
1953 void gen_cvt_itof(int t)
1955 if ((t & VT_BTYPE) == VT_LDOUBLE) {
1956 save_reg(TREG_ST0);
1957 gv(RC_INT);
1958 if ((vtop->type.t & VT_BTYPE) == VT_LLONG) {
1959 /* signed long long to float/double/long double (unsigned case
1960 is handled generically) */
1961 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1962 o(0x242cdf); /* fildll (%rsp) */
1963 o(0x08c48348); /* add $8, %rsp */
1964 } else if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
1965 (VT_INT | VT_UNSIGNED)) {
1966 /* unsigned int to float/double/long double */
1967 o(0x6a); /* push $0 */
1968 g(0x00);
1969 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1970 o(0x242cdf); /* fildll (%rsp) */
1971 o(0x10c48348); /* add $16, %rsp */
1972 } else {
1973 /* int to float/double/long double */
1974 o(0x50 + (vtop->r & VT_VALMASK)); /* push r */
1975 o(0x2404db); /* fildl (%rsp) */
1976 o(0x08c48348); /* add $8, %rsp */
1978 vtop->r = TREG_ST0;
1979 } else {
1980 int r = get_reg(RC_FLOAT);
1981 gv(RC_INT);
1982 o(0xf2 + ((t & VT_BTYPE) == VT_FLOAT?1:0));
1983 if ((vtop->type.t & (VT_BTYPE | VT_UNSIGNED)) ==
1984 (VT_INT | VT_UNSIGNED) ||
1985 (vtop->type.t & VT_BTYPE) == VT_LLONG) {
1986 o(0x48); /* REX */
1988 o(0x2a0f);
1989 o(0xc0 + (vtop->r & VT_VALMASK) + REG_VALUE(r)*8); /* cvtsi2sd */
1990 vtop->r = r;
1994 /* convert from one floating point type to another */
1995 void gen_cvt_ftof(int t)
1997 int ft, bt, tbt;
1999 ft = vtop->type.t;
2000 bt = ft & VT_BTYPE;
2001 tbt = t & VT_BTYPE;
2003 if (bt == VT_FLOAT) {
2004 gv(RC_FLOAT);
2005 if (tbt == VT_DOUBLE) {
2006 o(0x140f); /* unpcklps */
2007 o(0xc0 + REG_VALUE(vtop->r)*9);
2008 o(0x5a0f); /* cvtps2pd */
2009 o(0xc0 + REG_VALUE(vtop->r)*9);
2010 } else if (tbt == VT_LDOUBLE) {
2011 save_reg(RC_ST0);
2012 /* movss %xmm0,-0x10(%rsp) */
2013 o(0x110ff3);
2014 o(0x44 + REG_VALUE(vtop->r)*8);
2015 o(0xf024);
2016 o(0xf02444d9); /* flds -0x10(%rsp) */
2017 vtop->r = TREG_ST0;
2019 } else if (bt == VT_DOUBLE) {
2020 gv(RC_FLOAT);
2021 if (tbt == VT_FLOAT) {
2022 o(0x140f66); /* unpcklpd */
2023 o(0xc0 + REG_VALUE(vtop->r)*9);
2024 o(0x5a0f66); /* cvtpd2ps */
2025 o(0xc0 + REG_VALUE(vtop->r)*9);
2026 } else if (tbt == VT_LDOUBLE) {
2027 save_reg(RC_ST0);
2028 /* movsd %xmm0,-0x10(%rsp) */
2029 o(0x110ff2);
2030 o(0x44 + REG_VALUE(vtop->r)*8);
2031 o(0xf024);
2032 o(0xf02444dd); /* fldl -0x10(%rsp) */
2033 vtop->r = TREG_ST0;
2035 } else {
2036 int r;
2037 gv(RC_ST0);
2038 r = get_reg(RC_FLOAT);
2039 if (tbt == VT_DOUBLE) {
2040 o(0xf0245cdd); /* fstpl -0x10(%rsp) */
2041 /* movsd -0x10(%rsp),%xmm0 */
2042 o(0x100ff2);
2043 o(0x44 + REG_VALUE(r)*8);
2044 o(0xf024);
2045 vtop->r = r;
2046 } else if (tbt == VT_FLOAT) {
2047 o(0xf0245cd9); /* fstps -0x10(%rsp) */
2048 /* movss -0x10(%rsp),%xmm0 */
2049 o(0x100ff3);
2050 o(0x44 + REG_VALUE(r)*8);
2051 o(0xf024);
2052 vtop->r = r;
2057 /* convert fp to int 't' type */
2058 void gen_cvt_ftoi(int t)
2060 int ft, bt, size, r;
2061 ft = vtop->type.t;
2062 bt = ft & VT_BTYPE;
2063 if (bt == VT_LDOUBLE) {
2064 gen_cvt_ftof(VT_DOUBLE);
2065 bt = VT_DOUBLE;
2068 gv(RC_FLOAT);
2069 if (t != VT_INT)
2070 size = 8;
2071 else
2072 size = 4;
2074 r = get_reg(RC_INT);
2075 if (bt == VT_FLOAT) {
2076 o(0xf3);
2077 } else if (bt == VT_DOUBLE) {
2078 o(0xf2);
2079 } else {
2080 assert(0);
2082 orex(size == 8, r, 0, 0x2c0f); /* cvttss2si or cvttsd2si */
2083 o(0xc0 + REG_VALUE(vtop->r) + REG_VALUE(r)*8);
2084 vtop->r = r;
2087 /* computed goto support */
2088 void ggoto(void)
2090 gcall_or_jmp(1);
2091 vtop--;
2094 /* Save the stack pointer onto the stack and return the location of its address */
2095 ST_FUNC void gen_vla_sp_save(int addr) {
2096 /* mov %rsp,addr(%rbp)*/
2097 gen_modrm64(0x89, TREG_RSP, VT_LOCAL, NULL, addr);
2100 /* Restore the SP from a location on the stack */
2101 ST_FUNC void gen_vla_sp_restore(int addr) {
2102 gen_modrm64(0x8b, TREG_RSP, VT_LOCAL, NULL, addr);
2105 /* Subtract from the stack pointer, and push the resulting value onto the stack */
2106 ST_FUNC void gen_vla_alloc(CType *type, int align) {
2107 #ifdef TCC_TARGET_PE
2108 /* alloca does more than just adjust %rsp on Windows */
2109 vpush_global_sym(&func_old_type, TOK_alloca);
2110 vswap(); /* Move alloca ref past allocation size */
2111 gfunc_call(1);
2112 vset(type, REG_IRET, 0);
2113 #else
2114 int r;
2115 r = gv(RC_INT); /* allocation size */
2116 /* sub r,%rsp */
2117 o(0x2b48);
2118 o(0xe0 | REG_VALUE(r));
2119 /* We align to 16 bytes rather than align */
2120 /* and ~15, %rsp */
2121 o(0xf0e48348);
2122 /* mov %rsp, r */
2123 o(0x8948);
2124 o(0xe0 | REG_VALUE(r));
2125 vpop();
2126 vset(type, r, 0);
2127 #endif
2131 /* end of x86-64 code generator */
2132 /*************************************************************/
2133 #endif /* ! TARGET_DEFS_ONLY */
2134 /******************************************************/